Merge tag 'amd-drm-next-5.16-2021-09-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.16-2021-09-27:

amdgpu:
- RAS improvements
- BACO fixes
- Yellow Carp updates
- Misc code cleanups
- Initial DP 2.0 support
- VCN priority handling
- Cyan Skillfish updates
- Rework IB handling for multimedia engine tests
- Backlight fixes
- DCN 3.1 power saving improvements
- Runtime PM fixes
- Modifier support for DCC image stores for gfx 10.3
- Hotplug fixes
- Clean up stack related warnings in display code
- DP alt mode fixes
- Display rework for better handling FP code
- Debugfs fixes

amdkfd:
- SVM fixes
- DMA map fixes

radeon:
- AGP fix

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210927212653.4575-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2021-09-28 17:08:21 +10:00
commit 1e3944578b
192 changed files with 10246 additions and 2596 deletions

View File

@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained S: Maintained
F: drivers/platform/x86/amd-pmc.* F: drivers/platform/x86/amd-pmc.*
AMD POWERPLAY AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com> M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org L: amd-gfx@lists.freedesktop.org
S: Supported S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/ F: drivers/gpu/drm/amd/pm/
AMD PTDMA DRIVER AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com> M: Sanjay R Mehta <sanju.mehta@amd.com>

View File

@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1, [AMDGPU_HW_IP_VCN_JPEG] = 1,
}; };
static int amdgpu_ctx_priority_permit(struct drm_file *filp, bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
enum drm_sched_priority priority)
{ {
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
case AMDGPU_CTX_PRIORITY_HIGH:
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
return false;
}
}
static enum drm_sched_priority
amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_LOW:
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;
/* This should not happen as we sanitized userspace provided priority
* already, WARN if this happens.
*/
default:
WARN(1, "Invalid context priority %d\n", ctx_prio);
return DRM_SCHED_PRIORITY_NORMAL;
}
}
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority)
{
if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL; return -EINVAL;
/* NORMAL and below are accessible by everyone */ /* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL) if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0; return 0;
if (capable(CAP_SYS_NICE)) if (capable(CAP_SYS_NICE))
@ -62,26 +109,51 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES; return -EACCES;
} }
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
{ {
switch (prio) { switch (prio) {
case DRM_SCHED_PRIORITY_HIGH: case AMDGPU_CTX_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL: case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH; return AMDGPU_GFX_PIPE_PRIO_HIGH;
default: default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL; return AMDGPU_GFX_PIPE_PRIO_NORMAL;
} }
} }
static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
enum drm_sched_priority prio,
u32 hw_ip)
{ {
switch (prio) {
case AMDGPU_CTX_PRIORITY_HIGH:
return AMDGPU_RING_PRIO_1;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}
static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
struct amdgpu_device *adev = ctx->adev;
int32_t ctx_prio;
unsigned int hw_prio; unsigned int hw_prio;
hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ? ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
amdgpu_ctx_sched_prio_to_compute_prio(prio) : ctx->init_priority : ctx->override_priority;
AMDGPU_RING_PRIO_DEFAULT;
switch (hw_ip) {
case AMDGPU_HW_IP_COMPUTE:
hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
break;
case AMDGPU_HW_IP_VCE:
case AMDGPU_HW_IP_VCN_ENC:
hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
break;
default:
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
break;
}
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT; hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@ -89,15 +161,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
return hw_prio; return hw_prio;
} }
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring) const u32 ring)
{ {
struct amdgpu_device *adev = ctx->adev; struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity; struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0; unsigned num_scheds = 0;
int32_t ctx_prio;
unsigned int hw_prio; unsigned int hw_prio;
enum drm_sched_priority priority; enum drm_sched_priority drm_prio;
int r; int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@ -105,10 +179,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
if (!entity) if (!entity)
return -ENOMEM; return -ENOMEM;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
entity->sequence = 1; entity->sequence = 1;
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
ctx->init_priority : ctx->override_priority; drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched; scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
num_scheds = 1; num_scheds = 1;
} }
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
&ctx->guilty); &ctx->guilty);
if (r) if (r)
goto error_free_entity; goto error_free_entity;
@ -139,7 +214,7 @@ error_free_entity:
} }
static int amdgpu_ctx_init(struct amdgpu_device *adev, static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority, int32_t priority,
struct drm_file *filp, struct drm_file *filp,
struct amdgpu_ctx *ctx) struct amdgpu_ctx *ctx)
{ {
@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter; ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority; ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
return 0; return 0;
} }
@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, struct amdgpu_fpriv *fpriv,
struct drm_file *filp, struct drm_file *filp,
enum drm_sched_priority priority, int32_t priority,
uint32_t *id) uint32_t *id)
{ {
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{ {
int r; int r;
uint32_t id; uint32_t id;
enum drm_sched_priority priority; int32_t priority;
union drm_amdgpu_ctx *args = data; union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
id = args->in.ctx_id; id = args->in.ctx_id;
r = amdgpu_to_sched_priority(args->in.priority, &priority); priority = args->in.priority;
/* For backwards compatibility reasons, we need to accept /* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */ * ioctls with garbage in the priority field */
if (r == -EINVAL) if (!amdgpu_ctx_priority_is_valid(priority))
priority = DRM_SCHED_PRIORITY_NORMAL; priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX: case AMDGPU_CTX_OP_ALLOC_CTX:
@ -515,9 +590,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
} }
static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
struct amdgpu_ctx_entity *aentity, struct amdgpu_ctx_entity *aentity,
int hw_ip, int hw_ip,
enum drm_sched_priority priority) int32_t priority)
{ {
struct amdgpu_device *adev = ctx->adev; struct amdgpu_device *adev = ctx->adev;
unsigned int hw_prio; unsigned int hw_prio;
@ -525,12 +600,12 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
unsigned num_scheds; unsigned num_scheds;
/* set sw priority */ /* set sw priority */
drm_sched_entity_set_priority(&aentity->entity, priority); drm_sched_entity_set_priority(&aentity->entity,
amdgpu_ctx_to_drm_sched_prio(priority));
/* set hw priority */ /* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) { if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
AMDGPU_HW_IP_COMPUTE);
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched; scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
} }
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority) int32_t priority)
{ {
enum drm_sched_priority ctx_prio; int32_t ctx_prio;
unsigned i, j; unsigned i, j;
ctx->override_priority = priority; ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority; ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {

View File

@ -47,8 +47,8 @@ struct amdgpu_ctx {
spinlock_t ring_lock; spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented; bool preamble_presented;
enum drm_sched_priority init_priority; int32_t init_priority;
enum drm_sched_priority override_priority; int32_t override_priority;
struct mutex lock; struct mutex lock;
atomic_t guilty; atomic_t guilty;
unsigned long ras_counter_ce; unsigned long ras_counter_ce;
@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
uint64_t seq); uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio);
enum drm_sched_priority priority); void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);

View File

@ -36,6 +36,7 @@
#include "amdgpu_rap.h" #include "amdgpu_rap.h"
#include "amdgpu_securedisplay.h" #include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h" #include "amdgpu_fw_attestation.h"
#include "amdgpu_umr.h"
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev) int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
{ {
@ -279,6 +280,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
} }
static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd;
rd = kzalloc(sizeof *rd, GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->adev = file_inode(file)->i_private;
file->private_data = rd;
mutex_init(&rd->lock);
return 0;
}
static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd = file->private_data;
mutex_destroy(&rd->lock);
kfree(file->private_data);
return 0;
}
static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
{
struct amdgpu_debugfs_regs2_data *rd = f->private_data;
struct amdgpu_device *adev = rd->adev;
ssize_t result = 0;
int r;
uint32_t value;
if (size & 0x3 || offset & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
mutex_lock(&rd->lock);
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
return -EINVAL;
}
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
rd->id.grbm.sh,
rd->id.grbm.instance);
}
if (rd->id.use_srbm) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
rd->id.srbm.queue, rd->id.srbm.vmid);
}
if (rd->id.pg_lock)
mutex_lock(&adev->pm.mutex);
while (size) {
if (!write_en) {
value = RREG32(offset >> 2);
r = put_user(value, (uint32_t *)buf);
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
}
if (r) {
result = r;
goto end;
}
offset += 4;
size -= 4;
result += 4;
buf += 4;
}
end:
if (rd->id.use_grbm) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (rd->id.use_srbm) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
if (rd->id.pg_lock)
mutex_unlock(&adev->pm.mutex);
mutex_unlock(&rd->lock);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
}
static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
{
struct amdgpu_debugfs_regs2_data *rd = f->private_data;
int r;
switch (cmd) {
case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
mutex_lock(&rd->lock);
r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
mutex_unlock(&rd->lock);
return r ? -EINVAL : 0;
default:
return -EINVAL;
}
return 0;
}
static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
{
return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
}
static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
{
return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
}
/** /**
* amdgpu_debugfs_regs_pcie_read - Read from a PCIE register * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
@ -1091,6 +1231,16 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
return result; return result;
} }
static const struct file_operations amdgpu_debugfs_regs2_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
.read = amdgpu_debugfs_regs2_read,
.write = amdgpu_debugfs_regs2_write,
.open = amdgpu_debugfs_regs2_open,
.release = amdgpu_debugfs_regs2_release,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_fops = { static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read, .read = amdgpu_debugfs_regs_read,
@ -1148,6 +1298,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
static const struct file_operations *debugfs_regs[] = { static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs2_fops,
&amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops, &amdgpu_debugfs_regs_smc_fops,
@ -1160,6 +1311,7 @@ static const struct file_operations *debugfs_regs[] = {
static const char *debugfs_regs_names[] = { static const char *debugfs_regs_names[] = {
"amdgpu_regs", "amdgpu_regs",
"amdgpu_regs2",
"amdgpu_regs_didt", "amdgpu_regs_didt",
"amdgpu_regs_pcie", "amdgpu_regs_pcie",
"amdgpu_regs_smc", "amdgpu_regs_smc",
@ -1206,7 +1358,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
} }
/* Avoid accidently unparking the sched thread during GPU reset */ /* Avoid accidently unparking the sched thread during GPU reset */
r = down_read_killable(&adev->reset_sem); r = down_write_killable(&adev->reset_sem);
if (r) if (r)
return r; return r;
@ -1235,7 +1387,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_unpark(ring->sched.thread); kthread_unpark(ring->sched.thread);
} }
up_read(&adev->reset_sem); up_write(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
@ -1582,9 +1734,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!ring) if (!ring)
continue; continue;
if (amdgpu_debugfs_ring_init(adev, ring)) { amdgpu_debugfs_ring_init(adev, ring);
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
} }
amdgpu_ras_debugfs_create_all(adev); amdgpu_ras_debugfs_create_all(adev);

View File

@ -22,7 +22,6 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
/* /*
* Debugfs * Debugfs
*/ */

View File

@ -2745,6 +2745,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
} }
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
}
return 0; return 0;
} }
@ -2805,10 +2810,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ras_fini(adev); amdgpu_ras_fini(adev);
if (amdgpu_sriov_vf(adev))
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
return 0; return 0;
} }
@ -3538,17 +3539,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
/* enable PCIE atomic ops */
r = pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
if (r) {
adev->have_atomics_support = false;
DRM_INFO("PCIE atomic ops is not supported\n");
} else {
adev->have_atomics_support = true;
}
amdgpu_device_get_pcie_info(adev); amdgpu_device_get_pcie_info(adev);
if (amdgpu_mcbp) if (amdgpu_mcbp)
@ -3571,6 +3561,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
/* enable PCIE atomic ops */
if (amdgpu_sriov_vf(adev))
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
if (!adev->have_atomics_support)
dev_info(adev->dev, "PCIE atomic ops is not supported\n");
/* doorbell bar mapping and doorbell index init*/ /* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev); amdgpu_device_doorbell_init(adev);

View File

@ -1508,6 +1508,10 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev); struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r; int r;
/* Avoids registers access if device is physically gone */
if (!pci_device_is_present(adev->pdev))
adev->no_hw_access = true;
r = amdgpu_device_resume(drm_dev, true); r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev)) if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false; adev->in_s0ix = false;

View File

@ -34,6 +34,7 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#endif #endif
#include "amdgpu.h" #include "amdgpu.h"
#include <drm/drm_drv.h>
/* /*
* GART * GART
@ -230,12 +231,16 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
u64 page_base; u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */ /* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0; uint64_t flags = 0;
int idx;
if (!adev->gart.ready) { if (!adev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n"); WARN(1, "trying to unbind memory from uninitialized GART !\n");
return -EINVAL; return -EINVAL;
} }
if (!drm_dev_enter(&adev->ddev, &idx))
return 0;
t = offset / AMDGPU_GPU_PAGE_SIZE; t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
@ -254,6 +259,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < adev->num_vmhubs; i++) for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
drm_dev_exit(idx);
return 0; return 0;
} }
@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
{ {
uint64_t page_base; uint64_t page_base;
unsigned i, j, t; unsigned i, j, t;
int idx;
if (!adev->gart.ready) { if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n"); WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL; return -EINVAL;
} }
if (!drm_dev_enter(&adev->ddev, &idx))
return 0;
t = offset / AMDGPU_GPU_PAGE_SIZE; t = offset / AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
drm_dev_exit(idx);
return 0; return 0;
} }

View File

@ -60,10 +60,9 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock; goto unlock;
} }
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1); TTM_BO_VM_NUM_PREFAULT, 1);
drm_dev_exit(idx);
drm_dev_exit(idx);
} else { } else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
} }

View File

@ -42,10 +42,9 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
enum gfx_pipe_priority { enum amdgpu_gfx_pipe_priority {
AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1,
AMDGPU_GFX_PIPE_PRIO_HIGH, AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
AMDGPU_GFX_PIPE_PRIO_MAX
}; };
/* Argument for PPSMC_MSG_GpuChangeState */ /* Argument for PPSMC_MSG_GpuChangeState */

View File

@ -153,10 +153,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
int idx;
if (!drm_dev_enter(&adev->ddev, &idx))
return 0;
/* /*
* The following is for PTE only. GART does not have PDEs. * The following is for PTE only. GART does not have PDEs.
@ -165,8 +161,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
value |= flags; value |= flags;
writeq(value, ptr + (gpu_page_idx * 8)); writeq(value, ptr + (gpu_page_idx * 8));
drm_dev_exit(idx);
return 0; return 0;
} }
@ -749,6 +743,10 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size; u64 vram_end = vram_addr + vram_size;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
if (!drm_dev_enter(&adev->ddev, &idx))
return;
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE; flags |= AMDGPU_PTE_WRITEABLE;
@ -770,6 +768,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
/* Requires gart_ptb_gpu_pa to be 4K aligned */ /* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
drm_dev_exit(idx);
} }
/** /**

View File

@ -300,20 +300,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/ */
int amdgpu_ib_pool_init(struct amdgpu_device *adev) int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{ {
unsigned size;
int r, i; int r, i;
if (adev->ib_pool_ready) if (adev->ib_pool_ready)
return 0; return 0;
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
if (i == AMDGPU_IB_POOL_DIRECT)
size = PAGE_SIZE * 6;
else
size = AMDGPU_IB_POOL_SIZE;
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
size, AMDGPU_GPU_PAGE_SIZE, AMDGPU_IB_POOL_SIZE,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT); AMDGPU_GEM_DOMAIN_GTT);
if (r) if (r)
goto error; goto error;

View File

@ -341,27 +341,34 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
switch (query_fw->index) { switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI: case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.xgmi.feature_version; fw_info->feature = adev->psp.xgmi_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_RAS: case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ras.feature_version; fw_info->feature = adev->psp.ras_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_HDCP: case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.hdcp.feature_version; fw_info->feature = adev->psp.hdcp_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_DTM: case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.dtm.feature_version; fw_info->feature = adev->psp.dtm_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_RAP: case TA_FW_TYPE_PSP_RAP:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.rap.feature_version; fw_info->feature = adev->psp.rap_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_SECUREDISPLAY: case TA_FW_TYPE_PSP_SECUREDISPLAY:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.securedisplay.feature_version; fw_info->feature =
adev->psp.securedisplay_context.context.bin_desc
.feature_version;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -378,8 +385,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->psp.sos.feature_version; fw_info->feature = adev->psp.sos.feature_version;
break; break;
case AMDGPU_INFO_FW_ASD: case AMDGPU_INFO_FW_ASD:
fw_info->ver = adev->psp.asd.fw_version; fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
fw_info->feature = adev->psp.asd.feature_version; fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
break; break;
case AMDGPU_INFO_FW_DMCU: case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version; fw_info->ver = adev->dm.dmcu_fw_version;

View File

@ -31,7 +31,7 @@ void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr, uint64_t mc_status_addr,
unsigned long *error_count) unsigned long *error_count)
{ {
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@ -42,7 +42,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr, uint64_t mc_status_addr,
unsigned long *error_count) unsigned long *error_count)
{ {
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
@ -56,7 +56,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr) uint64_t mc_status_addr)
{ {
WREG64_PCIE(mc_status_addr * 4, 0x0ULL); WREG64_PCIE(mc_status_addr, 0x0ULL);
} }
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
@ -87,8 +87,8 @@ int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
if (!mca_dev->ras_if) if (!mca_dev->ras_if)
return -ENOMEM; return -ENOMEM;
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block; mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block;
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
mca_dev->ras_if->sub_block_index = 0;
} }
ih_info.head = fs_info.head = *mca_dev->ras_if; ih_info.head = fs_info.head = *mca_dev->ras_if;
r = amdgpu_ras_late_init(adev, mca_dev->ras_if, r = amdgpu_ras_late_init(adev, mca_dev->ras_if,

View File

@ -29,6 +29,7 @@ struct amdgpu_mca_ras_funcs {
void (*query_ras_error_address)(struct amdgpu_device *adev, void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status); void *ras_error_status);
uint32_t ras_block; uint32_t ras_block;
uint32_t ras_sub_block;
const char* sysfs_name; const char* sysfs_name;
}; };

View File

@ -694,40 +694,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
return r; return r;
} }
/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
* Sets placement according to domain; and changes placement and caching
* policy of the buffer object according to the placement.
* This is used for validating shadow bos. It calls ttm_bo_validate() to
* make sure the buffer is resident where it needs to be.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
uint32_t domain;
int r;
if (bo->tbo.pin_count)
return 0;
domain = bo->preferred_domains;
retry:
amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
}
return r;
}
/** /**
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
* *

View File

@ -327,7 +327,6 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem); uint64_t *gtt_mem, uint64_t *cpu_mem);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);

View File

@ -46,6 +46,10 @@ static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev);
static int psp_load_smu_fw(struct psp_context *psp); static int psp_load_smu_fw(struct psp_context *psp);
static int psp_ta_unload(struct psp_context *psp, struct ta_context *context);
static int psp_ta_load(struct psp_context *psp, struct ta_context *context);
static int psp_rap_terminate(struct psp_context *psp);
static int psp_securedisplay_terminate(struct psp_context *psp);
/* /*
* Due to DF Cstate management centralized to PMFW, the firmware * Due to DF Cstate management centralized to PMFW, the firmware
@ -778,46 +782,29 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret; return ret;
} }
static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t asd_mc, uint32_t size)
{
cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
cmd->cmd.cmd_load_ta.app_len = size;
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
}
static int psp_asd_load(struct psp_context *psp) static int psp_asd_load(struct psp_context *psp)
{
return psp_ta_load(psp, &psp->asd_context);
}
static int psp_asd_initialize(struct psp_context *psp)
{ {
int ret; int ret;
struct psp_gfx_cmd_resp *cmd;
/* If PSP version doesn't match ASD version, asd loading will be failed. /* If PSP version doesn't match ASD version, asd loading will be failed.
* add workaround to bypass it for sriov now. * add workaround to bypass it for sriov now.
* TODO: add version check to make it common * TODO: add version check to make it common
*/ */
if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes) if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0; return 0;
cmd = acquire_psp_cmd_buf(psp); psp->asd_context.mem_context.shared_mc_addr = 0;
psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes); ret = psp_asd_load(psp);
if (!ret)
psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_context.initialized = true;
psp->asd.size_bytes);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
psp->asd_context.asd_initialized = true;
psp->asd_context.session_id = cmd->resp.session_id;
}
release_psp_cmd_buf(psp);
return ret; return ret;
} }
@ -829,27 +816,39 @@ static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_unload_ta.session_id = session_id; cmd->cmd.cmd_unload_ta.session_id = session_id;
} }
static int psp_asd_unload(struct psp_context *psp) static int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
static int psp_asd_unload(struct psp_context *psp)
{
return psp_ta_unload(psp, &psp->asd_context);
}
static int psp_asd_terminate(struct psp_context *psp)
{ {
int ret; int ret;
struct psp_gfx_cmd_resp *cmd;
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->asd_context.asd_initialized) if (!psp->asd_context.initialized)
return 0; return 0;
cmd = acquire_psp_cmd_buf(psp); ret = psp_asd_unload(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) if (!ret)
psp->asd_context.asd_initialized = false; psp->asd_context.initialized = false;
release_psp_cmd_buf(psp);
return ret; return ret;
} }
@ -885,23 +884,22 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t ta_bin_mc, uint64_t ta_bin_mc,
uint32_t ta_bin_size, struct ta_context *context)
uint64_t ta_shared_mc,
uint32_t ta_shared_size)
{ {
cmd->cmd_id = GFX_CMD_ID_LOAD_TA; cmd->cmd_id = context->ta_load_type;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_len = ta_bin_size; cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); lower_32_bits(context->mem_context.shared_mc_addr);
cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
upper_32_bits(context->mem_context.shared_mc_addr);
cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
} }
static int psp_ta_init_shared_buf(struct psp_context *psp, static int psp_ta_init_shared_buf(struct psp_context *psp,
struct ta_mem_context *mem_ctx, struct ta_mem_context *mem_ctx)
uint32_t shared_mem_size)
{ {
int ret; int ret;
@ -909,8 +907,8 @@ static int psp_ta_init_shared_buf(struct psp_context *psp,
* Allocate 16k memory aligned to 4k from Frame Buffer (local * Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for ta to host memory * physical) for ta to host memory
*/ */
ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE, ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&mem_ctx->shared_bo, &mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr, &mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf); &mem_ctx->shared_buf);
@ -926,8 +924,7 @@ static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
static int psp_xgmi_init_shared_buf(struct psp_context *psp) static int psp_xgmi_init_shared_buf(struct psp_context *psp)
{ {
return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context, return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
PSP_XGMI_SHARED_MEM_SIZE);
} }
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@ -956,31 +953,23 @@ static int psp_ta_invoke(struct psp_context *psp,
return ret; return ret;
} }
static int psp_xgmi_load(struct psp_context *psp) static int psp_ta_load(struct psp_context *psp, struct ta_context *context)
{ {
int ret; int ret;
struct psp_gfx_cmd_resp *cmd; struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the loading in sriov for now
*/
cmd = acquire_psp_cmd_buf(psp); cmd = acquire_psp_cmd_buf(psp);
psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes); psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
psp_prep_ta_load_cmd_buf(cmd, psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
psp->fw_pri_mc_addr,
psp->xgmi.size_bytes,
psp->xgmi_context.context.mem_context.shared_mc_addr,
PSP_XGMI_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr); psp->fence_buf_mc_addr);
if (!ret) { if (!ret) {
psp->xgmi_context.context.initialized = true; context->session_id = cmd->resp.session_id;
psp->xgmi_context.context.session_id = cmd->resp.session_id;
} }
release_psp_cmd_buf(psp); release_psp_cmd_buf(psp);
@ -988,31 +977,14 @@ static int psp_xgmi_load(struct psp_context *psp)
return ret; return ret;
} }
static int psp_xgmi_load(struct psp_context *psp)
{
return psp_ta_load(psp, &psp->xgmi_context.context);
}
static int psp_xgmi_unload(struct psp_context *psp) static int psp_xgmi_unload(struct psp_context *psp)
{ {
int ret; return psp_ta_unload(psp, &psp->xgmi_context.context);
struct psp_gfx_cmd_resp *cmd;
struct amdgpu_device *adev = psp->adev;
/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
if (adev->asic_type == CHIP_ARCTURUS ||
(adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu))
return 0;
/*
* TODO: bypass the unloading in sriov for now
*/
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
} }
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@ -1023,6 +995,12 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
int psp_xgmi_terminate(struct psp_context *psp) int psp_xgmi_terminate(struct psp_context *psp)
{ {
int ret; int ret;
struct amdgpu_device *adev = psp->adev;
/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
if (adev->asic_type == CHIP_ARCTURUS ||
(adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu))
return 0;
if (!psp->xgmi_context.context.initialized) if (!psp->xgmi_context.context.initialized)
return 0; return 0;
@ -1045,13 +1023,16 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
int ret; int ret;
if (!psp->ta_fw || if (!psp->ta_fw ||
!psp->xgmi.size_bytes || !psp->xgmi_context.context.bin_desc.size_bytes ||
!psp->xgmi.start_addr) !psp->xgmi_context.context.bin_desc.start_addr)
return -ENOENT; return -ENOENT;
if (!load_ta) if (!load_ta)
goto invoke; goto invoke;
psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->xgmi_context.context.initialized) { if (!psp->xgmi_context.context.initialized) {
ret = psp_xgmi_init_shared_buf(psp); ret = psp_xgmi_init_shared_buf(psp);
if (ret) if (ret)
@ -1060,7 +1041,9 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
/* Load XGMI TA */ /* Load XGMI TA */
ret = psp_xgmi_load(psp); ret = psp_xgmi_load(psp);
if (ret) if (!ret)
psp->xgmi_context.context.initialized = true;
else
return ret; return ret;
invoke: invoke:
@ -1118,7 +1101,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
{ {
return psp->adev->asic_type == CHIP_ALDEBARAN && return psp->adev->asic_type == CHIP_ALDEBARAN &&
psp->xgmi.feature_version >= 0x2000000b; psp->xgmi_context.context.bin_desc.feature_version >= 0x2000000b;
} }
/* /*
@ -1282,80 +1265,17 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
// ras begin // ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp) static int psp_ras_init_shared_buf(struct psp_context *psp)
{ {
return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context, return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
PSP_RAS_SHARED_MEM_SIZE);
} }
static int psp_ras_load(struct psp_context *psp) static int psp_ras_load(struct psp_context *psp)
{ {
int ret; return psp_ta_load(psp, &psp->ras_context.context);
struct psp_gfx_cmd_resp *cmd;
struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
if (psp->adev->gmc.xgmi.connected_to_cpu)
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
else
ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
psp->ras.size_bytes,
psp->ras_context.context.mem_context.shared_mc_addr,
PSP_RAS_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
psp->ras_context.context.session_id = cmd->resp.session_id;
if (!ras_cmd->ras_status)
psp->ras_context.context.initialized = true;
else
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
}
release_psp_cmd_buf(psp);
if (ret || ras_cmd->ras_status)
amdgpu_ras_fini(psp->adev);
return ret;
} }
static int psp_ras_unload(struct psp_context *psp) static int psp_ras_unload(struct psp_context *psp)
{ {
int ret; return psp_ta_unload(psp, &psp->ras_context.context);
struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the unloading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
} }
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@ -1391,31 +1311,11 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev, dev_warn(psp->adev->dev,
"RAS internal register access blocked\n"); "RAS internal register access blocked\n");
}
return ret; if (ras_cmd->ras_status == TA_RAS_STATUS__ERROR_UNSUPPORTED_IP)
} dev_warn(psp->adev->dev, "RAS WARNING: cmd failed due to unsupported ip\n");
else if (ras_cmd->ras_status)
static int psp_ras_status_to_errno(struct amdgpu_device *adev, dev_warn(psp->adev->dev, "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
enum ta_ras_status ras_status)
{
int ret = -EINVAL;
switch (ras_status) {
case TA_RAS_STATUS__SUCCESS:
ret = 0;
break;
case TA_RAS_STATUS__RESET_NEEDED:
ret = -EAGAIN;
break;
case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
dev_warn(adev->dev, "RAS WARN: ras function unavailable\n");
break;
case TA_RAS_STATUS__ERROR_ASD_READ_WRITE:
dev_warn(adev->dev, "RAS WARN: asd read or write failed\n");
break;
default:
dev_err(adev->dev, "RAS ERROR: ras function failed ret 0x%X\n", ret);
} }
return ret; return ret;
@ -1444,7 +1344,7 @@ int psp_ras_enable_features(struct psp_context *psp,
if (ret) if (ret)
return -EINVAL; return -EINVAL;
return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); return 0;
} }
static int psp_ras_terminate(struct psp_context *psp) static int psp_ras_terminate(struct psp_context *psp)
@ -1477,6 +1377,7 @@ static int psp_ras_initialize(struct psp_context *psp)
int ret; int ret;
uint32_t boot_cfg = 0xFF; uint32_t boot_cfg = 0xFF;
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
struct ta_ras_shared_memory *ras_cmd;
/* /*
* TODO: bypass the initialize in sriov for now * TODO: bypass the initialize in sriov for now
@ -1484,8 +1385,8 @@ static int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return 0; return 0;
if (!adev->psp.ras.size_bytes || if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
!adev->psp.ras.start_addr) { !adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0; return 0;
} }
@ -1531,17 +1432,34 @@ static int psp_ras_initialize(struct psp_context *psp)
} }
} }
psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->ras_context.context.initialized) { if (!psp->ras_context.context.initialized) {
ret = psp_ras_init_shared_buf(psp); ret = psp_ras_init_shared_buf(psp);
if (ret) if (ret)
return ret; return ret;
} }
ret = psp_ras_load(psp); ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
if (ret) memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
return ret;
return 0; if (psp->adev->gmc.xgmi.connected_to_cpu)
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
else
ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
ret = psp_ras_load(psp);
if (!ret && !ras_cmd->ras_status)
psp->ras_context.context.initialized = true;
else {
if (ras_cmd->ras_status)
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
amdgpu_ras_fini(psp->adev);
}
return ret;
} }
int psp_ras_trigger_error(struct psp_context *psp, int psp_ras_trigger_error(struct psp_context *psp,
@ -1568,51 +1486,24 @@ int psp_ras_trigger_error(struct psp_context *psp,
if (amdgpu_ras_intr_triggered()) if (amdgpu_ras_intr_triggered())
return 0; return 0;
return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); if (ras_cmd->ras_status)
return -EINVAL;
return 0;
} }
// ras end // ras end
// HDCP start // HDCP start
static int psp_hdcp_init_shared_buf(struct psp_context *psp) static int psp_hdcp_init_shared_buf(struct psp_context *psp)
{ {
return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context, return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
PSP_HDCP_SHARED_MEM_SIZE);
} }
static int psp_hdcp_load(struct psp_context *psp) static int psp_hdcp_load(struct psp_context *psp)
{ {
int ret; return psp_ta_load(psp, &psp->hdcp_context.context);
struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
psp_copy_fw(psp, psp->hdcp.start_addr,
psp->hdcp.size_bytes);
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
psp->hdcp.size_bytes,
psp->hdcp_context.context.mem_context.shared_mc_addr,
PSP_HDCP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
psp->hdcp_context.context.initialized = true;
psp->hdcp_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->hdcp_context.mutex);
}
release_psp_cmd_buf(psp);
return ret;
} }
static int psp_hdcp_initialize(struct psp_context *psp) static int psp_hdcp_initialize(struct psp_context *psp)
{ {
int ret; int ret;
@ -1623,12 +1514,15 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->hdcp.size_bytes || if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp.start_addr) { !psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0; return 0;
} }
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->hdcp_context.context.initialized) { if (!psp->hdcp_context.context.initialized) {
ret = psp_hdcp_init_shared_buf(psp); ret = psp_hdcp_init_shared_buf(psp);
if (ret) if (ret)
@ -1636,32 +1530,17 @@ static int psp_hdcp_initialize(struct psp_context *psp)
} }
ret = psp_hdcp_load(psp); ret = psp_hdcp_load(psp);
if (ret) if (!ret) {
return ret; psp->hdcp_context.context.initialized = true;
mutex_init(&psp->hdcp_context.mutex);
}
return 0; return ret;
} }
static int psp_hdcp_unload(struct psp_context *psp) static int psp_hdcp_unload(struct psp_context *psp)
{ {
int ret; return psp_ta_unload(psp, &psp->hdcp_context.context);
struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the unloading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
} }
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@ -1709,42 +1588,12 @@ out:
// DTM start // DTM start
static int psp_dtm_init_shared_buf(struct psp_context *psp) static int psp_dtm_init_shared_buf(struct psp_context *psp)
{ {
return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context, return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
PSP_DTM_SHARED_MEM_SIZE);
} }
static int psp_dtm_load(struct psp_context *psp) static int psp_dtm_load(struct psp_context *psp)
{ {
int ret; return psp_ta_load(psp, &psp->dtm_context.context);
struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
psp->dtm.size_bytes,
psp->dtm_context.context.mem_context.shared_mc_addr,
PSP_DTM_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
psp->dtm_context.context.initialized = true;
psp->dtm_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->dtm_context.mutex);
}
release_psp_cmd_buf(psp);
return ret;
} }
static int psp_dtm_initialize(struct psp_context *psp) static int psp_dtm_initialize(struct psp_context *psp)
@ -1757,12 +1606,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->dtm.size_bytes || if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm.start_addr) { !psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0; return 0;
} }
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->dtm_context.context.initialized) { if (!psp->dtm_context.context.initialized) {
ret = psp_dtm_init_shared_buf(psp); ret = psp_dtm_init_shared_buf(psp);
if (ret) if (ret)
@ -1770,32 +1622,17 @@ static int psp_dtm_initialize(struct psp_context *psp)
} }
ret = psp_dtm_load(psp); ret = psp_dtm_load(psp);
if (ret) if (!ret) {
return ret; psp->dtm_context.context.initialized = true;
mutex_init(&psp->dtm_context.mutex);
}
return 0; return ret;
} }
static int psp_dtm_unload(struct psp_context *psp) static int psp_dtm_unload(struct psp_context *psp)
{ {
int ret; return psp_ta_unload(psp, &psp->dtm_context.context);
struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the unloading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
} }
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@ -1843,50 +1680,17 @@ out:
// RAP start // RAP start
static int psp_rap_init_shared_buf(struct psp_context *psp) static int psp_rap_init_shared_buf(struct psp_context *psp)
{ {
return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context, return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
PSP_RAP_SHARED_MEM_SIZE);
} }
static int psp_rap_load(struct psp_context *psp) static int psp_rap_load(struct psp_context *psp)
{ {
int ret; return psp_ta_load(psp, &psp->rap_context.context);
struct psp_gfx_cmd_resp *cmd;
psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
psp->rap.size_bytes,
psp->rap_context.context.mem_context.shared_mc_addr,
PSP_RAP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
psp->rap_context.context.initialized = true;
psp->rap_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->rap_context.mutex);
}
release_psp_cmd_buf(psp);
return ret;
} }
static int psp_rap_unload(struct psp_context *psp) static int psp_rap_unload(struct psp_context *psp)
{ {
int ret; return psp_ta_unload(psp, &psp->rap_context.context);
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
} }
static int psp_rap_initialize(struct psp_context *psp) static int psp_rap_initialize(struct psp_context *psp)
@ -1900,12 +1704,15 @@ static int psp_rap_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->rap.size_bytes || if (!psp->rap_context.context.bin_desc.size_bytes ||
!psp->rap.start_addr) { !psp->rap_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0; return 0;
} }
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->rap_context.context.initialized) { if (!psp->rap_context.context.initialized) {
ret = psp_rap_init_shared_buf(psp); ret = psp_rap_init_shared_buf(psp);
if (ret) if (ret)
@ -1913,16 +1720,15 @@ static int psp_rap_initialize(struct psp_context *psp)
} }
ret = psp_rap_load(psp); ret = psp_rap_load(psp);
if (ret) if (!ret) {
psp->rap_context.context.initialized = true;
mutex_init(&psp->rap_context.mutex);
} else
return ret; return ret;
ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
if (ret || status != TA_RAP_STATUS__SUCCESS) { if (ret || status != TA_RAP_STATUS__SUCCESS) {
psp_rap_unload(psp); psp_rap_terminate(psp);
psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
psp->rap_context.context.initialized = false;
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
ret, status); ret, status);
@ -1989,49 +1795,17 @@ out_unlock:
static int psp_securedisplay_init_shared_buf(struct psp_context *psp) static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
{ {
return psp_ta_init_shared_buf( return psp_ta_init_shared_buf(
psp, &psp->securedisplay_context.context.mem_context, psp, &psp->securedisplay_context.context.mem_context);
PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
} }
static int psp_securedisplay_load(struct psp_context *psp) static int psp_securedisplay_load(struct psp_context *psp)
{ {
int ret; return psp_ta_load(psp, &psp->securedisplay_context.context);
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
psp->securedisplay.size_bytes,
psp->securedisplay_context.context.mem_context.shared_mc_addr,
PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
psp->securedisplay_context.context.initialized = true;
psp->securedisplay_context.context.session_id = cmd->resp.session_id;
mutex_init(&psp->securedisplay_context.mutex);
}
release_psp_cmd_buf(psp);
return ret;
} }
static int psp_securedisplay_unload(struct psp_context *psp) static int psp_securedisplay_unload(struct psp_context *psp)
{ {
int ret; return psp_ta_unload(psp, &psp->securedisplay_context.context);
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
} }
static int psp_securedisplay_initialize(struct psp_context *psp) static int psp_securedisplay_initialize(struct psp_context *psp)
@ -2045,12 +1819,16 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->securedisplay.size_bytes || if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay.start_addr) { !psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
return 0; return 0;
} }
psp->securedisplay_context.context.mem_context.shared_mem_size =
PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->securedisplay_context.context.initialized) { if (!psp->securedisplay_context.context.initialized) {
ret = psp_securedisplay_init_shared_buf(psp); ret = psp_securedisplay_init_shared_buf(psp);
if (ret) if (ret)
@ -2058,7 +1836,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
} }
ret = psp_securedisplay_load(psp); ret = psp_securedisplay_load(psp);
if (ret) if (!ret) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
return ret; return ret;
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
@ -2066,12 +1847,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
if (ret) { if (ret) {
psp_securedisplay_unload(psp); psp_securedisplay_terminate(psp);
psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
psp->securedisplay_context.context.initialized = false;
dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
return -EINVAL; return -EINVAL;
} }
@ -2629,7 +2405,7 @@ skip_memalloc:
if (ret) if (ret)
goto failed; goto failed;
ret = psp_asd_load(psp); ret = psp_asd_initialize(psp);
if (ret) { if (ret) {
DRM_ERROR("PSP load asd failed!\n"); DRM_ERROR("PSP load asd failed!\n");
return ret; return ret;
@ -2721,7 +2497,7 @@ static int psp_hw_fini(void *handle)
psp_hdcp_terminate(psp); psp_hdcp_terminate(psp);
} }
psp_asd_unload(psp); psp_asd_terminate(psp);
psp_tmr_terminate(psp); psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM); psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@ -2779,9 +2555,9 @@ static int psp_suspend(void *handle)
} }
} }
ret = psp_asd_unload(psp); ret = psp_asd_terminate(psp);
if (ret) { if (ret) {
DRM_ERROR("Failed to unload asd\n"); DRM_ERROR("Failed to terminate asd\n");
return ret; return ret;
} }
@ -2826,7 +2602,7 @@ static int psp_resume(void *handle)
if (ret) if (ret)
goto failed; goto failed;
ret = psp_asd_load(psp); ret = psp_asd_initialize(psp);
if (ret) { if (ret) {
DRM_ERROR("PSP load asd failed!\n"); DRM_ERROR("PSP load asd failed!\n");
goto failed; goto failed;
@ -2994,10 +2770,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
goto out; goto out;
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
adev->psp.asd.start_addr = (uint8_t *)asd_hdr + adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0; return 0;
out: out:
@ -3284,40 +3060,43 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) { switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD: case TA_FW_TYPE_PSP_ASD:
psp->asd.fw_version = le32_to_cpu(desc->fw_version); psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->asd.feature_version = le32_to_cpu(desc->fw_version); psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->asd.size_bytes = le32_to_cpu(desc->size_bytes); psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->asd.start_addr = ucode_start_addr; psp->asd_context.bin_desc.start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_XGMI: case TA_FW_TYPE_PSP_XGMI:
psp->xgmi.feature_version = le32_to_cpu(desc->fw_version); psp->xgmi_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes); psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->xgmi.start_addr = ucode_start_addr; psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_RAS: case TA_FW_TYPE_PSP_RAS:
psp->ras.feature_version = le32_to_cpu(desc->fw_version); psp->ras_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->ras.size_bytes = le32_to_cpu(desc->size_bytes); psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ras.start_addr = ucode_start_addr; psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_HDCP: case TA_FW_TYPE_PSP_HDCP:
psp->hdcp.feature_version = le32_to_cpu(desc->fw_version); psp->hdcp_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes); psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->hdcp.start_addr = ucode_start_addr; psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_DTM: case TA_FW_TYPE_PSP_DTM:
psp->dtm.feature_version = le32_to_cpu(desc->fw_version); psp->dtm_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes); psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->dtm.start_addr = ucode_start_addr; psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_RAP: case TA_FW_TYPE_PSP_RAP:
psp->rap.feature_version = le32_to_cpu(desc->fw_version); psp->rap_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->rap.size_bytes = le32_to_cpu(desc->size_bytes); psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->rap.start_addr = ucode_start_addr; psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_SECUREDISPLAY: case TA_FW_TYPE_PSP_SECUREDISPLAY:
psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version); psp->securedisplay_context.context.bin_desc.feature_version =
psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes); le32_to_cpu(desc->fw_version);
psp->securedisplay.start_addr = ucode_start_addr; psp->securedisplay_context.context.bin_desc.size_bytes =
le32_to_cpu(desc->size_bytes);
psp->securedisplay_context.context.bin_desc.start_addr =
ucode_start_addr;
break; break;
default: default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);

View File

@ -34,17 +34,20 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000 #define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000) #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000
#define PSP_FW_NAME_LEN 0x24 #define PSP_FW_NAME_LEN 0x24
enum psp_shared_mem_size {
PSP_ASD_SHARED_MEM_SIZE = 0x0,
PSP_XGMI_SHARED_MEM_SIZE = 0x4000,
PSP_RAS_SHARED_MEM_SIZE = 0x4000,
PSP_HDCP_SHARED_MEM_SIZE = 0x4000,
PSP_DTM_SHARED_MEM_SIZE = 0x4000,
PSP_RAP_SHARED_MEM_SIZE = 0x4000,
PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000,
};
struct psp_context; struct psp_context;
struct psp_xgmi_node_info; struct psp_xgmi_node_info;
struct psp_xgmi_topology_info; struct psp_xgmi_topology_info;
@ -131,21 +134,26 @@ struct psp_xgmi_topology_info {
struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
}; };
struct psp_asd_context { struct psp_bin_desc {
bool asd_initialized; uint32_t fw_version;
uint32_t session_id; uint32_t feature_version;
uint32_t size_bytes;
uint8_t *start_addr;
}; };
struct ta_mem_context { struct ta_mem_context {
struct amdgpu_bo *shared_bo; struct amdgpu_bo *shared_bo;
uint64_t shared_mc_addr; uint64_t shared_mc_addr;
void *shared_buf; void *shared_buf;
enum psp_shared_mem_size shared_mem_size;
}; };
struct ta_context { struct ta_context {
bool initialized; bool initialized;
uint32_t session_id; uint32_t session_id;
struct ta_mem_context mem_context; struct ta_mem_context mem_context;
struct psp_bin_desc bin_desc;
enum psp_gfx_cmd_id ta_load_type;
}; };
struct ta_cp_context { struct ta_cp_context {
@ -263,13 +271,6 @@ struct psp_runtime_boot_cfg_entry {
uint32_t reserved; uint32_t reserved;
}; };
struct psp_bin_desc {
uint32_t fw_version;
uint32_t feature_version;
uint32_t size_bytes;
uint8_t *start_addr;
};
struct psp_context struct psp_context
{ {
struct amdgpu_device *adev; struct amdgpu_device *adev;
@ -301,7 +302,6 @@ struct psp_context
/* asd firmware */ /* asd firmware */
const struct firmware *asd_fw; const struct firmware *asd_fw;
struct psp_bin_desc asd;
/* toc firmware */ /* toc firmware */
const struct firmware *toc_fw; const struct firmware *toc_fw;
@ -326,14 +326,8 @@ struct psp_context
/* xgmi ta firmware and buffer */ /* xgmi ta firmware and buffer */
const struct firmware *ta_fw; const struct firmware *ta_fw;
uint32_t ta_fw_version; uint32_t ta_fw_version;
struct psp_bin_desc xgmi;
struct psp_bin_desc ras;
struct psp_bin_desc hdcp;
struct psp_bin_desc dtm;
struct psp_bin_desc rap;
struct psp_bin_desc securedisplay;
struct psp_asd_context asd_context; struct ta_context asd_context;
struct psp_xgmi_context xgmi_context; struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras_context; struct psp_ras_context ras_context;
struct ta_cp_context hdcp_context; struct ta_cp_context hdcp_context;

View File

@ -61,8 +61,30 @@ const char *ras_block_string[] = {
"mp0", "mp0",
"mp1", "mp1",
"fuse", "fuse",
"mca",
}; };
const char *ras_mca_block_string[] = {
"mca_mp0",
"mca_mp1",
"mca_mpio",
"mca_iohc",
};
const char *get_ras_block_str(struct ras_common_if *ras_block)
{
if (!ras_block)
return "NULL";
if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
return "OUT OF RANGE";
if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
return ras_mca_block_string[ras_block->sub_block_index];
return ras_block_string[ras_block->block];
}
#define ras_err_str(i) (ras_error_string[ffs(i)]) #define ras_err_str(i) (ras_error_string[ffs(i)])
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@ -187,7 +209,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
*block_id = i; *block_id = i;
if (strcmp(name, ras_block_str(i)) == 0) if (strcmp(name, ras_block_string[i]) == 0)
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
@ -509,7 +531,6 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
if (amdgpu_ras_query_error_status(obj->adev, &info)) if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL; return -EINVAL;
if (obj->adev->asic_type == CHIP_ALDEBARAN) { if (obj->adev->asic_type == CHIP_ALDEBARAN) {
if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
DRM_WARN("Failed to reset error counter and error status"); DRM_WARN("Failed to reset error counter and error status");
@ -529,7 +550,7 @@ static inline void put_obj(struct ras_manager *obj)
if (obj && (--obj->use == 0)) if (obj && (--obj->use == 0))
list_del(&obj->node); list_del(&obj->node);
if (obj && (obj->use < 0)) if (obj && (obj->use < 0))
DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block)); DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
} }
/* make one obj and return it. */ /* make one obj and return it. */
@ -545,7 +566,14 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
if (head->block >= AMDGPU_RAS_BLOCK_COUNT) if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
return NULL; return NULL;
obj = &con->objs[head->block]; if (head->block == AMDGPU_RAS_BLOCK__MCA) {
if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
return NULL;
obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
} else
obj = &con->objs[head->block];
/* already exist. return obj? */ /* already exist. return obj? */
if (alive_obj(obj)) if (alive_obj(obj))
return NULL; return NULL;
@ -573,19 +601,21 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
if (head->block >= AMDGPU_RAS_BLOCK_COUNT) if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
return NULL; return NULL;
obj = &con->objs[head->block]; if (head->block == AMDGPU_RAS_BLOCK__MCA) {
if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
return NULL;
if (alive_obj(obj)) { obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
WARN_ON(head->block != obj->head.block); } else
obj = &con->objs[head->block];
if (alive_obj(obj))
return obj; return obj;
}
} else { } else {
for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
obj = &con->objs[i]; obj = &con->objs[i];
if (alive_obj(obj)) { if (alive_obj(obj))
WARN_ON(i != obj->head.block);
return obj; return obj;
}
} }
} }
@ -626,8 +656,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
*/ */
if (!amdgpu_ras_is_feature_allowed(adev, head)) if (!amdgpu_ras_is_feature_allowed(adev, head))
return 0; return 0;
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
return 0;
if (enable) { if (enable) {
if (!obj) { if (!obj) {
@ -678,18 +706,13 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */ /* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
/* Are we alerady in that state we are going to set? */
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
ret = 0;
goto out;
}
if (!amdgpu_ras_intr_triggered()) { if (!amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, info, enable); ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) { if (ret) {
dev_err(adev->dev, "ras %s %s failed %d\n", dev_err(adev->dev, "ras %s %s failed %d\n",
enable ? "enable":"disable", enable ? "enable":"disable",
ras_block_str(head->block), get_ras_block_str(head),
ret); ret);
goto out; goto out;
} }
@ -731,7 +754,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (!ret) if (!ret)
dev_info(adev->dev, dev_info(adev->dev,
"RAS INFO: %s setup object\n", "RAS INFO: %s setup object\n",
ras_block_str(head->block)); get_ras_block_str(head));
} }
} else { } else {
/* setup the object then issue a ras TA disable cmd.*/ /* setup the object then issue a ras TA disable cmd.*/
@ -781,17 +804,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
bool bypass) bool bypass)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
int i; int i;
const enum amdgpu_ras_error_type default_ras_type = const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
AMDGPU_RAS_ERROR__NONE;
for (i = 0; i < ras_block_count; i++) { for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
struct ras_common_if head = { struct ras_common_if head = {
.block = i, .block = i,
.type = default_ras_type, .type = default_ras_type,
.sub_block_index = 0, .sub_block_index = 0,
}; };
if (i == AMDGPU_RAS_BLOCK__MCA)
continue;
if (bypass) {
/*
* bypass psp. vbios enable ras for us.
* so just create the obj
*/
if (__amdgpu_ras_feature_enable(adev, &head, 1))
break;
} else {
if (amdgpu_ras_feature_enable(adev, &head, 1))
break;
}
}
for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
struct ras_common_if head = {
.block = AMDGPU_RAS_BLOCK__MCA,
.type = default_ras_type,
.sub_block_index = i,
};
if (bypass) { if (bypass) {
/* /*
* bypass psp. vbios enable ras for us. * bypass psp. vbios enable ras for us.
@ -809,6 +854,32 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
} }
/* feature ctl end */ /* feature ctl end */
void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
struct ras_err_data *err_data)
{
switch (ras_block->sub_block_index) {
case AMDGPU_RAS_MCA_BLOCK__MP0:
if (adev->mca.mp0.ras_funcs &&
adev->mca.mp0.ras_funcs->query_ras_error_count)
adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_MCA_BLOCK__MP1:
if (adev->mca.mp1.ras_funcs &&
adev->mca.mp1.ras_funcs->query_ras_error_count)
adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_MCA_BLOCK__MPIO:
if (adev->mca.mpio.ras_funcs &&
adev->mca.mpio.ras_funcs->query_ras_error_count)
adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
}
}
/* query/inject/cure begin */ /* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev, int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info) struct ras_query_if *info)
@ -872,6 +943,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->hdp.ras_funcs->query_ras_error_count) adev->hdp.ras_funcs->query_ras_error_count)
adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
break; break;
case AMDGPU_RAS_BLOCK__MCA:
amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
break;
default: default:
break; break;
} }
@ -893,13 +967,13 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev), adev->smuio.funcs->get_die_id(adev),
obj->err_data.ce_count, obj->err_data.ce_count,
ras_block_str(info->head.block)); get_ras_block_str(&info->head));
} else { } else {
dev_info(adev->dev, "%ld correctable hardware errors " dev_info(adev->dev, "%ld correctable hardware errors "
"detected in %s block, no user " "detected in %s block, no user "
"action is needed.\n", "action is needed.\n",
obj->err_data.ce_count, obj->err_data.ce_count,
ras_block_str(info->head.block)); get_ras_block_str(&info->head));
} }
} }
if (err_data.ue_count) { if (err_data.ue_count) {
@ -912,12 +986,12 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev), adev->smuio.funcs->get_die_id(adev),
obj->err_data.ue_count, obj->err_data.ue_count,
ras_block_str(info->head.block)); get_ras_block_str(&info->head));
} else { } else {
dev_info(adev->dev, "%ld uncorrectable hardware errors " dev_info(adev->dev, "%ld uncorrectable hardware errors "
"detected in %s block\n", "detected in %s block\n",
obj->err_data.ue_count, obj->err_data.ue_count,
ras_block_str(info->head.block)); get_ras_block_str(&info->head));
} }
} }
@ -1027,6 +1101,7 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
case AMDGPU_RAS_BLOCK__SDMA: case AMDGPU_RAS_BLOCK__SDMA:
case AMDGPU_RAS_BLOCK__MMHUB: case AMDGPU_RAS_BLOCK__MMHUB:
case AMDGPU_RAS_BLOCK__PCIE_BIF: case AMDGPU_RAS_BLOCK__PCIE_BIF:
case AMDGPU_RAS_BLOCK__MCA:
ret = psp_ras_trigger_error(&adev->psp, &block_info); ret = psp_ras_trigger_error(&adev->psp, &block_info);
break; break;
case AMDGPU_RAS_BLOCK__XGMI_WAFL: case AMDGPU_RAS_BLOCK__XGMI_WAFL:
@ -1034,13 +1109,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break; break;
default: default:
dev_info(adev->dev, "%s error injection is not supported yet\n", dev_info(adev->dev, "%s error injection is not supported yet\n",
ras_block_str(info->head.block)); get_ras_block_str(&info->head));
ret = -EINVAL; ret = -EINVAL;
} }
if (ret) if (ret)
dev_err(adev->dev, "ras inject %s failed %d\n", dev_err(adev->dev, "ras inject %s failed %d\n",
ras_block_str(info->head.block), ret); get_ras_block_str(&info->head), ret);
return ret; return ret;
} }
@ -1383,7 +1458,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
if (amdgpu_ras_is_supported(adev, obj->head.block) && if (amdgpu_ras_is_supported(adev, obj->head.block) &&
(obj->attr_inuse == 1)) { (obj->attr_inuse == 1)) {
sprintf(fs_info.debugfs_name, "%s_err_inject", sprintf(fs_info.debugfs_name, "%s_err_inject",
ras_block_str(obj->head.block)); get_ras_block_str(&obj->head));
fs_info.head = obj->head; fs_info.head = obj->head;
amdgpu_ras_debugfs_create(adev, &fs_info, dir); amdgpu_ras_debugfs_create(adev, &fs_info, dir);
} }
@ -2056,19 +2131,6 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
} }
/* recovery end */ /* recovery end */
/* return 0 if ras will reset gpu and repost.*/
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (!ras)
return -EINVAL;
ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
return 0;
}
static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{ {
return adev->asic_type == CHIP_VEGA10 || return adev->asic_type == CHIP_VEGA10 ||
@ -2181,7 +2243,8 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
return 0; return 0;
con = kmalloc(sizeof(struct amdgpu_ras) + con = kmalloc(sizeof(struct amdgpu_ras) +
sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
GFP_KERNEL|__GFP_ZERO); GFP_KERNEL|__GFP_ZERO);
if (!con) if (!con)
return -ENOMEM; return -ENOMEM;
@ -2306,12 +2369,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
if (r) { if (r) {
if (r == -EAGAIN) { if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* request gpu reset. will run again */
amdgpu_ras_request_reset_on_boot(adev,
ras_block->block);
return 0;
} else if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* in resume phase, if fail to enable ras, /* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */ * clean up all ras fs nodes, and disable ras */
goto cleanup; goto cleanup;
@ -2403,19 +2461,6 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
} }
} }
} }
if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
/* setup ras obj state as disabled.
* for init_by_vbios case.
* if we want to enable ras, just enable it in a normal way.
* If we want do disable it, need setup ras obj as enabled,
* then issue another TA disable cmd.
* See feature_enable_on_boot
*/
amdgpu_ras_disable_all_features(adev, 1);
amdgpu_ras_reset_gpu(adev);
}
} }
void amdgpu_ras_suspend(struct amdgpu_device *adev) void amdgpu_ras_suspend(struct amdgpu_device *adev)

View File

@ -32,7 +32,6 @@
#include "amdgpu_ras_eeprom.h" #include "amdgpu_ras_eeprom.h"
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1)
enum amdgpu_ras_block { enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__UMC = 0,
@ -49,15 +48,22 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MP0, AMDGPU_RAS_BLOCK__MP0,
AMDGPU_RAS_BLOCK__MP1, AMDGPU_RAS_BLOCK__MP1,
AMDGPU_RAS_BLOCK__FUSE, AMDGPU_RAS_BLOCK__FUSE,
AMDGPU_RAS_BLOCK__MPIO, AMDGPU_RAS_BLOCK__MCA,
AMDGPU_RAS_BLOCK__LAST AMDGPU_RAS_BLOCK__LAST
}; };
extern const char *ras_block_string[]; enum amdgpu_ras_mca_block {
AMDGPU_RAS_MCA_BLOCK__MP0 = 0,
AMDGPU_RAS_MCA_BLOCK__MP1,
AMDGPU_RAS_MCA_BLOCK__MPIO,
AMDGPU_RAS_MCA_BLOCK__IOHC,
AMDGPU_RAS_MCA_BLOCK__LAST
};
#define ras_block_str(i) (ras_block_string[i])
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1) #define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
enum amdgpu_ras_gfx_subblock { enum amdgpu_ras_gfx_subblock {
@ -488,8 +494,6 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
} }
int amdgpu_ras_recovery_init(struct amdgpu_device *adev); int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block);
void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev);
@ -544,6 +548,8 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__MP1; return TA_RAS_BLOCK__MP1;
case AMDGPU_RAS_BLOCK__FUSE: case AMDGPU_RAS_BLOCK__FUSE:
return TA_RAS_BLOCK__FUSE; return TA_RAS_BLOCK__FUSE;
case AMDGPU_RAS_BLOCK__MCA:
return TA_RAS_BLOCK__MCA;
default: default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC; return TA_RAS_BLOCK__UMC;
@ -638,4 +644,6 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev);
int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev); int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev);
const char *get_ras_block_str(struct ras_common_if *ras_block);
#endif #endif

View File

@ -415,26 +415,20 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif #endif
int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring) struct amdgpu_ring *ring)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary; struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root; struct dentry *root = minor->debugfs_root;
char name[32]; char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name); sprintf(name, "amdgpu_ring_%s", ring->name);
debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
&amdgpu_debugfs_ring_fops,
ring->ring_size + 12);
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
#endif #endif
return 0;
} }
/** /**

View File

@ -36,8 +36,13 @@
#define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2 #define AMDGPU_MAX_UVD_ENC_RINGS 2
#define AMDGPU_RING_PRIO_DEFAULT 1 enum amdgpu_ring_priority_level {
#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX AMDGPU_RING_PRIO_0,
AMDGPU_RING_PRIO_1,
AMDGPU_RING_PRIO_DEFAULT = 1,
AMDGPU_RING_PRIO_2,
AMDGPU_RING_PRIO_MAX
};
/* some special values for the owner field */ /* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
@ -248,10 +253,6 @@ struct amdgpu_ring {
bool has_compute_vm_bug; bool has_compute_vm_bug;
bool no_scheduler; bool no_scheduler;
int hw_prio; int hw_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
}; };
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
@ -351,8 +352,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring); int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring); struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
#endif #endif

View File

@ -32,37 +32,9 @@
#include "amdgpu_sched.h" #include "amdgpu_sched.h"
#include "amdgpu_vm.h" #include "amdgpu_vm.h"
int amdgpu_to_sched_priority(int amdgpu_priority,
enum drm_sched_priority *prio)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
*prio = DRM_SCHED_PRIORITY_HIGH;
break;
case AMDGPU_CTX_PRIORITY_HIGH:
*prio = DRM_SCHED_PRIORITY_HIGH;
break;
case AMDGPU_CTX_PRIORITY_NORMAL:
*prio = DRM_SCHED_PRIORITY_NORMAL;
break;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
*prio = DRM_SCHED_PRIORITY_MIN;
break;
case AMDGPU_CTX_PRIORITY_UNSET:
*prio = DRM_SCHED_PRIORITY_UNSET;
break;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
return -EINVAL;
}
return 0;
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd, int fd,
enum drm_sched_priority priority) int32_t priority)
{ {
struct fd f = fdget(fd); struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv; struct amdgpu_fpriv *fpriv;
@ -89,7 +61,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
int fd, int fd,
unsigned ctx_id, unsigned ctx_id,
enum drm_sched_priority priority) int32_t priority)
{ {
struct fd f = fdget(fd); struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv; struct amdgpu_fpriv *fpriv;
@ -124,7 +96,6 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{ {
union drm_amdgpu_sched *args = data; union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
enum drm_sched_priority priority;
int r; int r;
/* First check the op, then the op's argument. /* First check the op, then the op's argument.
@ -138,21 +109,22 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
r = amdgpu_to_sched_priority(args->in.priority, &priority); if (!amdgpu_ctx_priority_is_valid(args->in.priority)) {
if (r) WARN(1, "Invalid context priority %d\n", args->in.priority);
return r; return -EINVAL;
}
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
r = amdgpu_sched_process_priority_override(adev, r = amdgpu_sched_process_priority_override(adev,
args->in.fd, args->in.fd,
priority); args->in.priority);
break; break;
case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
r = amdgpu_sched_context_priority_override(adev, r = amdgpu_sched_context_priority_override(adev,
args->in.fd, args->in.fd,
args->in.ctx_id, args->in.ctx_id,
priority); args->in.priority);
break; break;
default: default:
/* Impossible. /* Impossible.

View File

@ -525,9 +525,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version); FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version); FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.feature_version);
FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version); FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.feature_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
@ -572,6 +572,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
const struct mes_firmware_header_v1_0 *mes_hdr = NULL; const struct mes_firmware_header_v1_0 *mes_hdr = NULL;
u8 *ucode_addr;
if (NULL == ucode->fw) if (NULL == ucode->fw)
return 0; return 0;
@ -588,94 +589,83 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
(ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && switch (ucode->ucode_id) {
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && case AMDGPU_UCODE_ID_CP_MEC1:
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && case AMDGPU_UCODE_ID_CP_MEC2:
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES && le32_to_cpu(cp_hdr->jt_size) * 4;
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES_DATA && ucode_addr = (u8 *)ucode->fw->data +
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && le32_to_cpu(header->ucode_array_offset_bytes);
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && break;
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && case AMDGPU_UCODE_ID_CP_MEC1_JT:
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM && case AMDGPU_UCODE_ID_CP_MEC2_JT:
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM && ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && ucode_addr = (u8 *)ucode->fw->data +
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV && le32_to_cpu(header->ucode_array_offset_bytes) +
ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) { le32_to_cpu(cp_hdr->jt_offset) * 4;
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); break;
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
le32_to_cpu(header->ucode_array_offset_bytes)), ucode_addr = adev->gfx.rlc.save_restore_list_cntl;
ucode->ucode_size); break;
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 || case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) { ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - ucode_addr = adev->gfx.rlc.save_restore_list_gpm;
le32_to_cpu(cp_hdr->jt_size) * 4; break;
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
le32_to_cpu(header->ucode_array_offset_bytes)), ucode_addr = adev->gfx.rlc.save_restore_list_srm;
ucode->ucode_size); break;
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || case AMDGPU_UCODE_ID_RLC_IRAM:
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) { ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; ucode_addr = adev->gfx.rlc.rlc_iram_ucode;
break;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + case AMDGPU_UCODE_ID_RLC_DRAM:
le32_to_cpu(header->ucode_array_offset_bytes) + ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
le32_to_cpu(cp_hdr->jt_offset) * 4), ucode_addr = adev->gfx.rlc.rlc_dram_ucode;
ucode->ucode_size); break;
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) { case AMDGPU_UCODE_ID_CP_MES:
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
ucode_addr = (u8 *)ucode->fw->data +
le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
break;
case AMDGPU_UCODE_ID_CP_MES_DATA:
ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
ucode_addr = (u8 *)ucode->fw->data +
le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
break;
case AMDGPU_UCODE_ID_DMCU_ERAM:
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
le32_to_cpu(dmcu_hdr->intv_size_bytes); le32_to_cpu(dmcu_hdr->intv_size_bytes);
ucode_addr = (u8 *)ucode->fw->data +
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + le32_to_cpu(header->ucode_array_offset_bytes);
le32_to_cpu(header->ucode_array_offset_bytes)), break;
ucode->ucode_size); case AMDGPU_UCODE_ID_DMCU_INTV:
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) { ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); ucode_addr = (u8 *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes) +
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + le32_to_cpu(dmcu_hdr->intv_offset_bytes);
le32_to_cpu(header->ucode_array_offset_bytes) + break;
le32_to_cpu(dmcu_hdr->intv_offset_bytes)), case AMDGPU_UCODE_ID_DMCUB:
ucode->ucode_size); ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) { ucode_addr = (u8 *)ucode->fw->data +
ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); le32_to_cpu(header->ucode_array_offset_bytes);
memcpy(ucode->kaddr, break;
(void *)((uint8_t *)ucode->fw->data + default:
le32_to_cpu(header->ucode_array_offset_bytes)), ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
ucode->ucode_size); ucode_addr = (u8 *)ucode->fw->data +
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { le32_to_cpu(header->ucode_array_offset_bytes);
ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; break;
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, }
ucode->ucode_size); } else {
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) { ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; ucode_addr = (u8 *)ucode->fw->data +
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm, le32_to_cpu(header->ucode_array_offset_bytes);
ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)),
ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA) {
ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)),
ucode->ucode_size);
} }
memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size);
return 0; return 0;
} }

View File

@ -0,0 +1,51 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/ioctl.h>
/*
* MMIO debugfs IOCTL structure
*/
struct amdgpu_debugfs_regs2_iocdata {
__u32 use_srbm, use_grbm, pg_lock;
struct {
__u32 se, sh, instance;
} grbm;
struct {
__u32 me, pipe, queue, vmid;
} srbm;
};
/*
* MMIO debugfs state data (per file* handle)
*/
struct amdgpu_debugfs_regs2_data {
struct amdgpu_device *adev;
struct mutex lock;
struct amdgpu_debugfs_regs2_iocdata id;
};
enum AMDGPU_DEBUGFS_REGS2_CMDS {
AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
};
#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata)

View File

@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20); MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work); static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
uint32_t size,
struct amdgpu_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_bo *bo = NULL;
void *addr;
int r;
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, &addr);
if (r)
return r;
if (adev->uvd.address_64_bit)
goto succ;
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
if (r)
goto err_pin;
r = amdgpu_bo_kmap(bo, &addr);
if (r)
goto err_kmap;
succ:
amdgpu_bo_unreserve(bo);
*bo_ptr = bo;
return 0;
err_kmap:
amdgpu_bo_unpin(bo);
err_pin:
err:
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{ {
@ -302,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true; adev->uvd.address_64_bit = true;
r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
if (r)
return r;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TONGA: case CHIP_TONGA:
adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
@ -324,6 +373,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
int i, j; int i, j;
drm_sched_entity_destroy(&adev->uvd.entity); drm_sched_entity_destroy(&adev->uvd.entity);
@ -342,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
} }
amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
release_firmware(adev->uvd.fw); release_firmware(adev->uvd.fw);
return 0; return 0;
@ -1080,23 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0; unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
}
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
goto err; return r;
if (adev->asic_type >= CHIP_VEGA10) { if (adev->asic_type >= CHIP_VEGA10) {
offset_idx = 1 + ring->me; offset_idx = 1 + ring->me;
@ -1147,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free; goto err_free;
} }
amdgpu_bo_reserve(bo, true);
amdgpu_bo_fence(bo, f, false); amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
@ -1159,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
err_free: err_free:
amdgpu_job_free(job); amdgpu_job_free(job);
err:
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }
@ -1173,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = adev->uvd.ib_bo;
uint32_t *msg; uint32_t *msg;
int r, i; int i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD create msg */ /* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4); msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000); msg[1] = cpu_to_le32(0x00000000);
@ -1199,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
return amdgpu_uvd_send_msg(ring, bo, true, fence); return amdgpu_uvd_send_msg(ring, bo, true, fence);
} }
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
@ -1209,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, if (direct) {
AMDGPU_GEM_DOMAIN_GTT, bo = adev->uvd.ib_bo;
&bo, NULL, (void **)&msg); } else {
if (r) r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
return r; if (r)
return r;
}
msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD destroy msg */ /* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4); msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002); msg[1] = cpu_to_le32(0x00000002);
@ -1223,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i) for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
return amdgpu_uvd_send_msg(ring, bo, direct, fence); r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
if (!direct)
amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
return r;
} }
static void amdgpu_uvd_idle_work_handler(struct work_struct *work) static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
@ -1298,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence; struct dma_fence *fence;
long r; long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL); r = amdgpu_uvd_get_create_msg(ring, 1, &fence);
if (r) if (r)
goto error; goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
dma_fence_put(fence);
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
goto error;
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) if (r)
goto error; goto error;

View File

@ -68,6 +68,7 @@ struct amdgpu_uvd {
/* store image width to adjust nb memory state */ /* store image width to adjust nb memory state */
unsigned decode_image_width; unsigned decode_image_width;
uint32_t keyselect; uint32_t keyselect;
struct amdgpu_bo *ib_bo;
}; };
int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_init(struct amdgpu_device *adev);

View File

@ -82,7 +82,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work); static void amdgpu_vce_idle_work_handler(struct work_struct *work);
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo,
struct dma_fence **fence); struct dma_fence **fence);
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence); bool direct, struct dma_fence **fence);
@ -441,12 +440,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test * Open up a stream for HW test
*/ */
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo,
struct dma_fence **fence) struct dma_fence **fence)
{ {
const unsigned ib_size_dw = 1024; const unsigned ib_size_dw = 1024;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct amdgpu_ib ib_msg;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
uint64_t addr; uint64_t addr;
int i, r; int i, r;
@ -456,9 +455,17 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
if (r) if (r)
return r; return r;
ib = &job->ibs[0]; memset(&ib_msg, 0, sizeof(ib_msg));
/* only one gpu page is needed, alloc +1 page to make addr aligned. */
r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_IB_POOL_DIRECT,
&ib_msg);
if (r)
goto err;
addr = amdgpu_bo_gpu_offset(bo); ib = &job->ibs[0];
/* let addr point to page boundary */
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
/* stitch together an VCE create msg */ /* stitch together an VCE create msg */
ib->length_dw = 0; ib->length_dw = 0;
@ -498,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f); r = amdgpu_job_submit_direct(job, ring, &f);
amdgpu_ib_free(ring->adev, &ib_msg, f);
if (r) if (r)
goto err; goto err;
@ -1134,20 +1142,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r; long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */ /* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0]) if (ring != &ring->adev->vce.ring[0])
return 0; return 0;
r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, r = amdgpu_vce_get_create_msg(ring, 1, NULL);
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
if (r)
return r;
r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r) if (r)
goto error; goto error;
@ -1163,7 +1164,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error: error:
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r; return r;
} }
enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
{
switch(ring) {
case 0:
return AMDGPU_RING_PRIO_0;
case 1:
return AMDGPU_RING_PRIO_1;
case 2:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}

View File

@ -71,5 +71,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring);
#endif #endif

View File

@ -541,15 +541,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
} }
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo, struct amdgpu_ib *ib_msg,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
uint64_t addr; uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
void *msg = NULL;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64, r = amdgpu_job_alloc_with_ib(adev, 64,
@ -558,8 +557,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err; goto err;
ib = &job->ibs[0]; ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr; ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@ -576,9 +573,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r) if (r)
goto err_free; goto err_free;
amdgpu_bo_fence(bo, f, false); amdgpu_ib_free(adev, ib_msg, f);
amdgpu_bo_unreserve(bo);
amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
@ -588,27 +583,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free: err_free:
amdgpu_job_free(job); amdgpu_job_free(job);
err: err:
amdgpu_bo_unreserve(bo); amdgpu_ib_free(adev, ib_msg, f);
amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
return r; return r;
} }
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo **bo) struct amdgpu_ib *ib)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
*bo = NULL; memset(ib, 0, sizeof(*ib));
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_IB_POOL_DIRECT,
bo, NULL, (void **)&msg); ib);
if (r) if (r)
return r; return r;
msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028); msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038); msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001); msg[2] = cpu_to_le32(0x00000001);
@ -630,19 +624,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
} }
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo **bo) struct amdgpu_ib *ib)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
*bo = NULL; memset(ib, 0, sizeof(*ib));
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_IB_POOL_DIRECT,
bo, NULL, (void **)&msg); ib);
if (r) if (r)
return r; return r;
msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028); msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018); msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000); msg[2] = cpu_to_le32(0x00000000);
@ -658,21 +653,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_bo *bo; struct amdgpu_ib ib;
long r; long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_dec_send_msg(ring, bo, NULL); r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_dec_send_msg(ring, bo, &fence); r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
if (r) if (r)
goto error; goto error;
@ -688,8 +683,8 @@ error:
} }
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo, struct amdgpu_ib *ib_msg,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
const unsigned int ib_size_dw = 64; const unsigned int ib_size_dw = 64;
@ -697,7 +692,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
uint64_t addr; uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
@ -706,7 +701,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
goto err; goto err;
ib = &job->ibs[0]; ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0; ib->length_dw = 0;
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
@ -726,9 +720,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r) if (r)
goto err_free; goto err_free;
amdgpu_bo_fence(bo, f, false); amdgpu_ib_free(adev, ib_msg, f);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
@ -738,31 +730,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free: err_free:
amdgpu_job_free(job); amdgpu_job_free(job);
err: err:
amdgpu_bo_unreserve(bo); amdgpu_ib_free(adev, ib_msg, f);
amdgpu_bo_unref(&bo);
return r; return r;
} }
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_bo *bo; struct amdgpu_ib ib;
long r; long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL); r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence); r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
if (r) if (r)
goto error; goto error;
@ -809,7 +799,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
} }
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo, struct amdgpu_ib *ib_msg,
struct dma_fence **fence) struct dma_fence **fence)
{ {
const unsigned ib_size_dw = 16; const unsigned ib_size_dw = 16;
@ -825,7 +815,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r; return r;
ib = &job->ibs[0]; ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo); addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0; ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000018;
@ -863,7 +853,7 @@ err:
} }
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo, struct amdgpu_ib *ib_msg,
struct dma_fence **fence) struct dma_fence **fence)
{ {
const unsigned ib_size_dw = 16; const unsigned ib_size_dw = 16;
@ -879,7 +869,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r; return r;
ib = &job->ibs[0]; ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo); addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0; ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000018;
@ -918,21 +908,23 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL; struct amdgpu_ib ib;
long r; long r;
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, memset(&ib, 0, sizeof(ib));
AMDGPU_GEM_DOMAIN_VRAM, r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
&bo, NULL, NULL); AMDGPU_IB_POOL_DIRECT,
&ib);
if (r) if (r)
return r; return r;
r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
if (r) if (r)
goto error; goto error;
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
if (r) if (r)
goto error; goto error;
@ -943,9 +935,22 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0; r = 0;
error: error:
amdgpu_ib_free(adev, &ib, fence);
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r; return r;
} }
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
{
switch(ring) {
case 0:
return AMDGPU_RING_PRIO_0;
case 1:
return AMDGPU_RING_PRIO_1;
case 2:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}

View File

@ -308,4 +308,6 @@ int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
#endif #endif

View File

@ -532,9 +532,12 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version); adev->psp.asd_context.bin_desc.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
adev->psp.ras_context.context.bin_desc.feature_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
adev->psp.xgmi_context.context.bin_desc.feature_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);

View File

@ -800,7 +800,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo = &vmbo->bo; struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries; unsigned entries, ats_entries;
uint64_t addr; uint64_t addr;
int r; int r, idx;
/* Figure out our place in the hierarchy */ /* Figure out our place in the hierarchy */
if (ancestor->parent) { if (ancestor->parent) {
@ -845,9 +845,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
return r; return r;
} }
if (!drm_dev_enter(&adev->ddev, &idx))
return -ENODEV;
r = vm->update_funcs->map_table(vmbo); r = vm->update_funcs->map_table(vmbo);
if (r) if (r)
return r; goto exit;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.adev = adev; params.adev = adev;
@ -856,7 +859,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
return r; goto exit;
addr = 0; addr = 0;
if (ats_entries) { if (ats_entries) {
@ -872,7 +875,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries, r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags); value, flags);
if (r) if (r)
return r; goto exit;
addr += ats_entries * 8; addr += ats_entries * 8;
} }
@ -895,10 +898,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->update(&params, vmbo, addr, 0, entries, r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags); value, flags);
if (r) if (r)
return r; goto exit;
} }
return vm->update_funcs->commit(&params, NULL); r = vm->update_funcs->commit(&params, NULL);
exit:
drm_dev_exit(idx);
return r;
} }
/** /**
@ -1384,11 +1390,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate) struct amdgpu_vm *vm, bool immediate)
{ {
struct amdgpu_vm_update_params params; struct amdgpu_vm_update_params params;
int r; int r, idx;
if (list_empty(&vm->relocated)) if (list_empty(&vm->relocated))
return 0; return 0;
if (!drm_dev_enter(&adev->ddev, &idx))
return -ENODEV;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.adev = adev; params.adev = adev;
params.vm = vm; params.vm = vm;
@ -1396,7 +1405,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
return r; goto exit;
while (!list_empty(&vm->relocated)) { while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_bo_base *entry; struct amdgpu_vm_bo_base *entry;
@ -1414,10 +1423,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, &vm->last_update); r = vm->update_funcs->commit(&params, &vm->last_update);
if (r) if (r)
goto error; goto error;
drm_dev_exit(idx);
return 0; return 0;
error: error:
amdgpu_vm_invalidate_pds(adev, vm); amdgpu_vm_invalidate_pds(adev, vm);
exit:
drm_dev_exit(idx);
return r; return r;
} }

View File

@ -204,8 +204,10 @@ struct amd_sriov_msg_pf2vf_info {
} mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST]; } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
/* UUID info */ /* UUID info */
struct amd_sriov_msg_uuid_info uuid_info; struct amd_sriov_msg_uuid_info uuid_info;
/* pcie atomic Ops info */
uint32_t pcie_atomic_ops_enabled_flags;
/* reserved */ /* reserved */
uint32_t reserved[256 - 47]; uint32_t reserved[256 - 48];
}; };
struct amd_sriov_msg_vf2pf_info_header { struct amd_sriov_msg_vf2pf_info_header {

View File

@ -52,7 +52,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
.ras_fini = mca_v3_0_mp0_ras_fini, .ras_fini = mca_v3_0_mp0_ras_fini,
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL, .query_ras_error_address = NULL,
.ras_block = AMDGPU_RAS_BLOCK__MP0, .ras_block = AMDGPU_RAS_BLOCK__MCA,
.ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0,
.sysfs_name = "mp0_err_count", .sysfs_name = "mp0_err_count",
}; };
@ -79,7 +80,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
.ras_fini = mca_v3_0_mp1_ras_fini, .ras_fini = mca_v3_0_mp1_ras_fini,
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL, .query_ras_error_address = NULL,
.ras_block = AMDGPU_RAS_BLOCK__MP1, .ras_block = AMDGPU_RAS_BLOCK__MCA,
.ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1,
.sysfs_name = "mp1_err_count", .sysfs_name = "mp1_err_count",
}; };
@ -106,7 +108,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
.ras_fini = mca_v3_0_mpio_ras_fini, .ras_fini = mca_v3_0_mpio_ras_fini,
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL, .query_ras_error_address = NULL,
.ras_block = AMDGPU_RAS_BLOCK__MPIO, .ras_block = AMDGPU_RAS_BLOCK__MCA,
.ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO,
.sysfs_name = "mpio_err_count", .sysfs_name = "mpio_err_count",
}; };

View File

@ -387,13 +387,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
"errors detected in %s block, " "errors detected in %s block, "
"no user action is needed.\n", "no user action is needed.\n",
obj->err_data.ce_count, obj->err_data.ce_count,
ras_block_str(adev->nbio.ras_if->block)); get_ras_block_str(adev->nbio.ras_if));
if (err_data.ue_count) if (err_data.ue_count)
dev_info(adev->dev, "%ld uncorrectable hardware " dev_info(adev->dev, "%ld uncorrectable hardware "
"errors detected in %s block\n", "errors detected in %s block\n",
obj->err_data.ue_count, obj->err_data.ue_count,
ras_block_str(adev->nbio.ras_if->block)); get_ras_block_str(adev->nbio.ras_if));
} }
dev_info(adev->dev, "RAS controller interrupt triggered " dev_info(adev->dev, "RAS controller interrupt triggered "
@ -566,7 +566,9 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
return r; return r;
} }
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 #define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030
#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status) void *ras_error_status)
@ -575,12 +577,20 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
uint32_t corr, fatal, non_fatal; uint32_t corr, fatal, non_fatal;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); if (adev->asic_type == CHIP_ALDEBARAN)
global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE);
else
global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
ParityErrNonFatal); ParityErrNonFatal);
parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
if (adev->asic_type == CHIP_ALDEBARAN)
parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE);
else
parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
if (corr) if (corr)
err_data->ce_count++; err_data->ce_count++;
@ -589,13 +599,21 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
if (corr || fatal || non_fatal) { if (corr || fatal || non_fatal) {
central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
/* clear error status register */ /* clear error status register */
WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts);
else
WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
if (fatal) if (fatal)
{
/* clear parity fatal error indication field */ /* clear parity fatal error indication field */
WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, if (adev->asic_type == CHIP_ALDEBARAN)
parity_sts); WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts);
else
WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts);
}
if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
BIFL_RasContller_Intr_Recv)) { BIFL_RasContller_Intr_Recv)) {

View File

@ -84,28 +84,28 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *) ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data; adev->psp.ta_fw->data;
adev->psp.hdcp.feature_version = adev->psp.hdcp_context.context.bin_desc.feature_version =
le32_to_cpu(ta_hdr->hdcp.fw_version); le32_to_cpu(ta_hdr->hdcp.fw_version);
adev->psp.hdcp.size_bytes = adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes); le32_to_cpu(ta_hdr->hdcp.size_bytes);
adev->psp.hdcp.start_addr = adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr + (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.dtm.feature_version = adev->psp.dtm_context.context.bin_desc.feature_version =
le32_to_cpu(ta_hdr->dtm.fw_version); le32_to_cpu(ta_hdr->dtm.fw_version);
adev->psp.dtm.size_bytes = adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes); le32_to_cpu(ta_hdr->dtm.size_bytes);
adev->psp.dtm.start_addr = adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp.start_addr + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes); le32_to_cpu(ta_hdr->dtm.offset_bytes);
adev->psp.securedisplay.feature_version = adev->psp.securedisplay_context.context.bin_desc.feature_version =
le32_to_cpu(ta_hdr->securedisplay.fw_version); le32_to_cpu(ta_hdr->securedisplay.fw_version);
adev->psp.securedisplay.size_bytes = adev->psp.securedisplay_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->securedisplay.size_bytes); le32_to_cpu(ta_hdr->securedisplay.size_bytes);
adev->psp.securedisplay.start_addr = adev->psp.securedisplay_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp.start_addr + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->securedisplay.offset_bytes); le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);

View File

@ -151,14 +151,20 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2; goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version); adev->psp.xgmi_context.context.bin_desc.feature_version =
adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes); le32_to_cpu(ta_hdr->xgmi.fw_version);
adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr + adev->psp.xgmi_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->xgmi.size_bytes);
adev->psp.xgmi_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version); adev->psp.ras_context.context.bin_desc.feature_version =
adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes); le32_to_cpu(ta_hdr->ras.fw_version);
adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr + adev->psp.ras_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->ras.size_bytes);
adev->psp.ras_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->ras.offset_bytes); le32_to_cpu(ta_hdr->ras.offset_bytes);
} }
break; break;
@ -186,16 +192,24 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2; goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version); adev->psp.hdcp_context.context.bin_desc.feature_version =
adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); le32_to_cpu(ta_hdr->hdcp.fw_version);
adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr + adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); le32_to_cpu(ta_hdr->hdcp.size_bytes);
adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(
ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version); adev->psp.dtm_context.context.bin_desc.feature_version =
adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); le32_to_cpu(ta_hdr->dtm.fw_version);
adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr + adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes);
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context
.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes); le32_to_cpu(ta_hdr->dtm.offset_bytes);
} }
break; break;

View File

@ -84,22 +84,22 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *) ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data; adev->psp.ta_fw->data;
adev->psp.hdcp.feature_version = adev->psp.hdcp_context.context.bin_desc.feature_version =
le32_to_cpu(ta_hdr->hdcp.fw_version); le32_to_cpu(ta_hdr->hdcp.fw_version);
adev->psp.hdcp.size_bytes = adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes); le32_to_cpu(ta_hdr->hdcp.size_bytes);
adev->psp.hdcp.start_addr = adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr + (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
adev->psp.dtm.feature_version = adev->psp.dtm_context.context.bin_desc.feature_version =
le32_to_cpu(ta_hdr->dtm.fw_version); le32_to_cpu(ta_hdr->dtm.fw_version);
adev->psp.dtm.size_bytes = adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes); le32_to_cpu(ta_hdr->dtm.size_bytes);
adev->psp.dtm.start_addr = adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp.start_addr + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes); le32_to_cpu(ta_hdr->dtm.offset_bytes);
} }

View File

@ -375,10 +375,10 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
*/ */
static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring) static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
{ {
uint32_t gcr_cntl = uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
SDMA_GCR_GLI_INV(1); SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));

View File

@ -85,6 +85,8 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
static const struct amd_ip_funcs soc15_common_ip_funcs;
/* Vega, Raven, Arcturus */ /* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{ {
@ -1645,7 +1647,7 @@ static int soc15_common_set_powergating_state(void *handle,
return 0; return 0;
} }
const struct amd_ip_funcs soc15_common_ip_funcs = { static const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common", .name = "soc15_common",
.early_init = soc15_common_early_init, .early_init = soc15_common_early_init,
.late_init = soc15_common_late_init, .late_init = soc15_common_late_init,

View File

@ -31,8 +31,6 @@
#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 #define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 #define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
extern const struct amd_ip_funcs soc15_common_ip_funcs;
struct soc15_reg_golden { struct soc15_reg_golden {
u32 hwip; u32 hwip;
u32 instance; u32 instance;

View File

@ -38,9 +38,8 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR, TA_RAS_COMMAND__TRIGGER_ERROR,
}; };
enum ta_ras_status enum ta_ras_status {
{ TA_RAS_STATUS__SUCCESS = 0x0000,
TA_RAS_STATUS__SUCCESS = 0x00,
TA_RAS_STATUS__RESET_NEEDED = 0xA001, TA_RAS_STATUS__RESET_NEEDED = 0xA001,
TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002, TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
@ -55,7 +54,12 @@ enum ta_ras_status
TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C, TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
TA_RAS_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013,
TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014
}; };
enum ta_ras_block { enum ta_ras_block {
@ -73,9 +77,18 @@ enum ta_ras_block {
TA_RAS_BLOCK__MP0, TA_RAS_BLOCK__MP0,
TA_RAS_BLOCK__MP1, TA_RAS_BLOCK__MP1,
TA_RAS_BLOCK__FUSE, TA_RAS_BLOCK__FUSE,
TA_RAS_BLOCK__MCA,
TA_NUM_BLOCK_MAX TA_NUM_BLOCK_MAX
}; };
enum ta_ras_mca_block {
TA_RAS_MCA_BLOCK__MP0 = 0,
TA_RAS_MCA_BLOCK__MP1 = 1,
TA_RAS_MCA_BLOCK__MPIO = 2,
TA_RAS_MCA_BLOCK__IOHC = 3,
TA_MCA_NUM_BLOCK_MAX
};
enum ta_ras_error_type { enum ta_ras_error_type {
TA_RAS_ERROR__NONE = 0, TA_RAS_ERROR__NONE = 0,
TA_RAS_ERROR__PARITY = 1, TA_RAS_ERROR__PARITY = 1,
@ -105,17 +118,15 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc. uint64_t value; // method if error injection. i.e persistent, coherent etc.
}; };
struct ta_ras_init_flags struct ta_ras_init_flags {
{ uint8_t poison_mode_en;
uint8_t poison_mode_en; uint8_t dgpu_mode;
uint8_t dgpu_mode;
}; };
struct ta_ras_output_flags struct ta_ras_output_flags {
{ uint8_t ras_init_success_flag;
uint8_t ras_init_success_flag; uint8_t err_inject_switch_disable_flag;
uint8_t err_inject_switch_disable_flag; uint8_t reg_access_failure_flag;
uint8_t reg_access_failure_flag;
}; };
/* Common input structure for RAS callbacks */ /* Common input structure for RAS callbacks */
@ -126,14 +137,13 @@ union ta_ras_cmd_input {
struct ta_ras_disable_features_input disable_features; struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error; struct ta_ras_trigger_error_input trigger_error;
uint32_t reserve_pad[256]; uint32_t reserve_pad[256];
}; };
union ta_ras_cmd_output union ta_ras_cmd_output {
{ struct ta_ras_output_flags flags;
struct ta_ras_output_flags flags;
uint32_t reserve_pad[256]; uint32_t reserve_pad[256];
}; };
/* Shared Memory structures */ /* Shared Memory structures */

View File

@ -698,6 +698,19 @@ static int uvd_v3_1_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v3_1_stop(adev);
return 0;
}
static int uvd_v3_1_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -722,17 +735,6 @@ static int uvd_v3_1_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
if (RREG32(mmUVD_STATUS) != 0)
uvd_v3_1_stop(adev);
return 0;
}
static int uvd_v3_1_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = uvd_v3_1_hw_fini(adev); r = uvd_v3_1_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -212,6 +212,19 @@ static int uvd_v4_2_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
return 0;
}
static int uvd_v4_2_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -236,17 +249,6 @@ static int uvd_v4_2_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
return 0;
}
static int uvd_v4_2_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = uvd_v4_2_hw_fini(adev); r = uvd_v4_2_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -210,6 +210,19 @@ static int uvd_v5_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
return 0;
}
static int uvd_v5_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -234,17 +247,6 @@ static int uvd_v5_0_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
return 0;
}
static int uvd_v5_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = uvd_v5_0_hw_fini(adev); r = uvd_v5_0_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -332,15 +332,9 @@ err:
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r; long r;
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
if (r)
return r;
r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r) if (r)
goto error; goto error;
@ -357,9 +351,6 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error: error:
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }

View File

@ -338,15 +338,9 @@ err:
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r; long r;
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
if (r)
return r;
r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL); r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r) if (r)
goto error; goto error;
@ -363,9 +357,6 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error: error:
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }
@ -606,6 +597,23 @@ static int uvd_v7_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
else {
/* full access mode, so don't touch any UVD register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
return 0;
}
static int uvd_v7_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -630,21 +638,6 @@ static int uvd_v7_0_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
else {
/* full access mode, so don't touch any UVD register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
return 0;
}
static int uvd_v7_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = uvd_v7_0_hw_fini(adev); r = uvd_v7_0_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -431,10 +431,12 @@ static int vce_v2_0_sw_init(void *handle)
return r; return r;
for (i = 0; i < adev->vce.num_rings; i++) { for (i = 0; i < adev->vce.num_rings; i++) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i]; ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i); sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); hw_prio, NULL);
if (r) if (r)
return r; return r;
} }
@ -479,6 +481,17 @@ static int vce_v2_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vce.idle_work);
return 0;
}
static int vce_v2_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -502,14 +515,6 @@ static int vce_v2_0_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
return 0;
}
static int vce_v2_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vce_v2_0_hw_fini(adev); r = vce_v2_0_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -440,10 +440,12 @@ static int vce_v3_0_sw_init(void *handle)
return r; return r;
for (i = 0; i < adev->vce.num_rings; i++) { for (i = 0; i < adev->vce.num_rings; i++) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i]; ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i); sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); hw_prio, NULL);
if (r) if (r)
return r; return r;
} }
@ -490,6 +492,21 @@ static int vce_v3_0_hw_fini(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vce.idle_work);
r = vce_v3_0_wait_for_idle(handle);
if (r)
return r;
vce_v3_0_stop(adev);
return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
}
static int vce_v3_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -513,19 +530,6 @@ static int vce_v3_0_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
r = vce_v3_0_wait_for_idle(handle);
if (r)
return r;
vce_v3_0_stop(adev);
return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
}
static int vce_v3_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vce_v3_0_hw_fini(adev); r = vce_v3_0_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -463,6 +463,8 @@ static int vce_v4_0_sw_init(void *handle)
} }
for (i = 0; i < adev->vce.num_rings; i++) { for (i = 0; i < adev->vce.num_rings; i++) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i]; ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i); sprintf(ring->name, "vce%d", i);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
@ -478,7 +480,7 @@ static int vce_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
} }
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); hw_prio, NULL);
if (r) if (r)
return r; return r;
} }
@ -542,29 +544,8 @@ static int vce_v4_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->vce.idle_work); cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
}
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */ /* vce_v4_0_wait_for_idle(handle); */
vce_v4_0_stop(adev); vce_v4_0_stop(adev);
@ -594,6 +575,29 @@ static int vce_v4_0_suspend(void *handle)
drm_dev_exit(idx); drm_dev_exit(idx);
} }
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
}
r = vce_v4_0_hw_fini(adev); r = vce_v4_0_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -145,10 +145,12 @@ static int vcn_v1_0_sw_init(void *handle)
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) { for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i]; ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i); sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); hw_prio, NULL);
if (r) if (r)
return r; return r;
} }

View File

@ -22,6 +22,7 @@
*/ */
#include <linux/firmware.h> #include <linux/firmware.h>
#include <drm/drm_drv.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
@ -159,6 +160,8 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) { for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i]; ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true; ring->use_doorbell = true;
if (!amdgpu_sriov_vf(adev)) if (!amdgpu_sriov_vf(adev))
@ -167,7 +170,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i); sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); hw_prio, NULL);
if (r) if (r)
return r; return r;
} }
@ -192,11 +195,14 @@ static int vcn_v2_0_sw_init(void *handle)
*/ */
static int vcn_v2_0_sw_fini(void *handle) static int vcn_v2_0_sw_fini(void *handle)
{ {
int r; int r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
fw_shared->present_flag_0 = 0; if (drm_dev_enter(&adev->ddev, &idx)) {
fw_shared->present_flag_0 = 0;
drm_dev_exit(idx);
}
amdgpu_virt_free_mm_table(adev); amdgpu_virt_free_mm_table(adev);

View File

@ -22,6 +22,7 @@
*/ */
#include <linux/firmware.h> #include <linux/firmware.h>
#include <drm/drm_drv.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
@ -194,6 +195,8 @@ static int vcn_v2_5_sw_init(void *handle)
return r; return r;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) { for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst[j].ring_enc[i]; ring = &adev->vcn.inst[j].ring_enc[i];
ring->use_doorbell = true; ring->use_doorbell = true;
@ -203,7 +206,7 @@ static int vcn_v2_5_sw_init(void *handle)
sprintf(ring->name, "vcn_enc_%d.%d", j, i); sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512, r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0, &adev->vcn.inst[j].irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); hw_prio, NULL);
if (r) if (r)
return r; return r;
} }
@ -233,17 +236,21 @@ static int vcn_v2_5_sw_init(void *handle)
*/ */
static int vcn_v2_5_sw_fini(void *handle) static int vcn_v2_5_sw_fini(void *handle)
{ {
int i, r; int i, r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared; volatile struct amdgpu_fw_shared *fw_shared;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (drm_dev_enter(&adev->ddev, &idx)) {
if (adev->vcn.harvest_config & (1 << i)) for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
continue; if (adev->vcn.harvest_config & (1 << i))
fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; continue;
fw_shared->present_flag_0 = 0; fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
fw_shared->present_flag_0 = 0;
}
drm_dev_exit(idx);
} }
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev); amdgpu_virt_free_mm_table(adev);

View File

@ -224,6 +224,8 @@ static int vcn_v3_0_sw_init(void *handle)
return r; return r;
for (j = 0; j < adev->vcn.num_enc_rings; ++j) { for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
/* VCN ENC TRAP */ /* VCN ENC TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
@ -239,8 +241,7 @@ static int vcn_v3_0_sw_init(void *handle)
} }
sprintf(ring->name, "vcn_enc_%d.%d", i, j); sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
AMDGPU_RING_PRIO_DEFAULT, hw_prio, &adev->vcn.inst[i].sched_score);
&adev->vcn.inst[i].sched_score);
if (r) if (r)
return r; return r;
} }

View File

@ -971,7 +971,6 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd) void kgd2kfd_device_exit(struct kfd_dev *kfd)
{ {
if (kfd->init_complete) { if (kfd->init_complete) {
svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
device_queue_manager_uninit(kfd->dqm); device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd); kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd); kfd_topology_remove_device(kfd);

View File

@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev)
pgmap->ops = &svm_migrate_pgmap_ops; pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap); r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) { if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n"); pr_err("failed to register HMM device memory\n");
/* Disable SVM support capability */
pgmap->type = 0;
devm_release_mem_region(adev->dev, res->start, devm_release_mem_region(adev->dev, res->start,
res->end - res->start + 1); res->end - res->start + 1);
return PTR_ERR(r); return PTR_ERR(r);
@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev)
return 0; return 0;
} }
void svm_migrate_fini(struct amdgpu_device *adev)
{
struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
devm_memunmap_pages(adev->dev, pgmap);
devm_release_mem_region(adev->dev, pgmap->range.start,
pgmap->range.end - pgmap->range.start + 1);
}

View File

@ -47,7 +47,6 @@ unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
int svm_migrate_init(struct amdgpu_device *adev); int svm_migrate_init(struct amdgpu_device *adev);
void svm_migrate_fini(struct amdgpu_device *adev);
#else #else
@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev)
{ {
return 0; return 0;
} }
static inline void svm_migrate_fini(struct amdgpu_device *adev)
{
/* empty */
}
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */

View File

@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange)
mmu_interval_notifier_remove(&prange->notifier); mmu_interval_notifier_remove(&prange->notifier);
} }
static bool
svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr && !dma_mapping_error(dev, dma_addr) &&
!(dma_addr & SVM_RANGE_VRAM_DOMAIN);
}
static int static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long offset, unsigned long npages, unsigned long offset, unsigned long npages,
@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
addr += offset; addr += offset;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
"leaking dma mapping\n"))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]); page = hmm_pfn_to_page(hmm_pfns[i]);
@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
return; return;
for (i = offset; i < offset + npages; i++) { for (i = offset; i < offset + npages; i++) {
if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue; continue;
pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned long last_start; unsigned long last_start;
int last_domain; int last_domain;
int r = 0; int r = 0;
int64_t i; int64_t i, j;
last_start = prange->start + offset; last_start = prange->start + offset;
@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = offset; i < offset + npages; i++) { for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
if ((prange->start + i) < prange->last &&
/* Collect all pages in the same address range and memory domain
* that can be mapped with a single call to update mapping.
*/
if (i < offset + npages - 1 &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
continue; continue;
@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
NULL, dma_addr, NULL, dma_addr,
&vm->last_update, &vm->last_update,
&table_freed); &table_freed);
for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain;
if (r) { if (r) {
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
goto out; goto out;

View File

@ -215,6 +215,8 @@ static void handle_cursor_update(struct drm_plane *plane,
static const struct drm_format_info * static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static bool static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state); struct drm_crtc_state *new_crtc_state);
@ -618,6 +620,116 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
} }
#endif #endif
/**
* dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
* Dmub AUX or SET_CONFIG command completion processing callback
* Copies dmub notification to DM which is to be read by AUX command.
* issuing thread and also signals the event to wake up the thread.
*/
void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
}
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
* Dmub Hpd interrupt processing callback. Gets displayindex through the
* ink index and calls helper to do the processing.
*/
void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
uint8_t link_index = 0;
struct drm_device *dev = adev->dm.ddev;
if (adev == NULL)
return;
if (notify == NULL) {
DRM_ERROR("DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
handle_hpd_irq_helper(aconnector);
break;
}
}
drm_connector_list_iter_end(&iter);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
/**
* register_dmub_notify_callback - Sets callback for DMUB notify
* @adev: amdgpu_device pointer
* @type: Type of dmub notification
* @callback: Dmub interrupt callback function
* @dmub_int_thread_offload: offload indicator
*
* API to register a dmub callback handler for a dmub notification
* Also sets indicator whether callback processing to be offloaded.
* to dmub interrupt handling thread
* Return: true if successfully registered, false if there is existing registration
*/
bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
{
if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
} else
return false;
return true;
}
static void dm_handle_hpd_work(struct work_struct *work)
{
struct dmub_hpd_work *dmub_hpd_wrk;
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
return;
}
if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
dmub_hpd_wrk->dmub_notify);
}
kfree(dmub_hpd_wrk);
}
#define DMUB_TRACE_MAX_READ 64 #define DMUB_TRACE_MAX_READ 64
/** /**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@ -634,18 +746,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 }; struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0; uint32_t count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
if (dc_enable_dmub_notifications(adev->dm.dc)) { if (dc_enable_dmub_notifications(adev->dm.dc)) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
DRM_ERROR("Failed to allocate dmub_hpd_wrk");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
do { do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify); dc_stat_get_dmub_notification(adev->dm.dc, &notify);
} while (notify.pending_notification); if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
ARRAY_SIZE(dm->dmub_thread_offload));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk->dmub_notify = &notify;
dmub_hpd_wrk->adev = adev;
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
} else {
dm->dmub_callback[notify.type](adev, &notify);
}
if (adev->dm.dmub_notify) } while (notify.pending_notification);
memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
// TODO : HPD Implementation
} else { } else {
DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
@ -1083,6 +1210,83 @@ static void vblank_control_worker(struct work_struct *work)
} }
#endif #endif
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
struct hpd_rx_irq_offload_work *offload_work;
struct amdgpu_dm_connector *aconnector;
struct dc_link *dc_link;
struct amdgpu_device *adev;
enum dc_connection_type new_connection_type = dc_connection_none;
unsigned long flags;
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
if (!aconnector) {
DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_sink(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
goto skip;
if (amdgpu_in_reset(adev))
goto skip;
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
dc_link_dp_handle_automated_test(dc_link);
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
dc_link_dp_handle_link_loss(dc_link);
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
}
mutex_unlock(&adev->dm.dc_lock);
skip:
kfree(offload_work);
}
static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
{
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
if (!hpd_rx_offload_wq)
return NULL;
for (i = 0; i < max_caps; i++) {
hpd_rx_offload_wq[i].wq =
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
return NULL;
}
spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
}
return hpd_rx_offload_wq;
}
static int amdgpu_dm_init(struct amdgpu_device *adev) static int amdgpu_dm_init(struct amdgpu_device *adev)
{ {
struct dc_init_data init_data; struct dc_init_data init_data;
@ -1201,6 +1405,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc); dc_hardware_init(adev->dm.dc);
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
if (!adev->dm.hpd_rx_offload_wq) {
DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config; struct dc_phy_addr_space_config pa_config;
@ -1253,7 +1463,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
goto error; goto error;
} }
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev); amdgpu_dm_outbox_init(adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
#endif
} }
if (amdgpu_dm_initialize_drm_device(adev)) { if (amdgpu_dm_initialize_drm_device(adev)) {
@ -1335,6 +1563,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (dc_enable_dmub_notifications(adev->dm.dc)) { if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify); kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL; adev->dm.dmub_notify = NULL;
destroy_workqueue(adev->dm.delayed_hpd_wq);
adev->dm.delayed_hpd_wq = NULL;
} }
if (adev->dm.dmub_bo) if (adev->dm.dmub_bo)
@ -1342,6 +1572,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr); &adev->dm.dmub_bo_cpu_addr);
if (adev->dm.hpd_rx_offload_wq) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
adev->dm.hpd_rx_offload_wq[i].wq = NULL;
}
}
kfree(adev->dm.hpd_rx_offload_wq);
adev->dm.hpd_rx_offload_wq = NULL;
}
/* DC Destroy TODO: Replace destroy DAL */ /* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc) if (adev->dm.dc)
dc_destroy(&adev->dm.dc); dc_destroy(&adev->dm.dc);
@ -1978,6 +2220,16 @@ context_alloc_fail:
return res; return res;
} }
static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
{
int i;
if (dm->hpd_rx_offload_wq) {
for (i = 0; i < dm->dc->caps.max_links; i++)
flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
}
}
static int dm_suspend(void *handle) static int dm_suspend(void *handle)
{ {
struct amdgpu_device *adev = handle; struct amdgpu_device *adev = handle;
@ -1999,6 +2251,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev); amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
return ret; return ret;
} }
@ -2009,6 +2263,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev); amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0; return 0;
@ -2155,7 +2411,7 @@ cleanup:
return; return;
} }
static void dm_set_dpms_off(struct dc_link *link) static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
{ {
struct dc_stream_state *stream_state; struct dc_stream_state *stream_state;
struct amdgpu_dm_connector *aconnector = link->priv; struct amdgpu_dm_connector *aconnector = link->priv;
@ -2176,6 +2432,7 @@ static void dm_set_dpms_off(struct dc_link *link)
} }
stream_update.stream = stream_state; stream_update.stream = stream_state;
acrtc_state->force_dpms_off = true;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
stream_state, &stream_update, stream_state, &stream_update,
stream_state->ctx->dc->current_state); stream_state->ctx->dc->current_state);
@ -2613,20 +2870,22 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(sink); dc_sink_release(sink);
} }
static void handle_hpd_irq(void *param) static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{ {
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base; struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
#endif struct dm_crtc_state *dm_crtc_state = NULL;
if (adev->dm.disable_hpd_irq) if (adev->dm.disable_hpd_irq)
return; return;
if (dm_con_state->base.state && dm_con_state->base.crtc)
dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
dm_con_state->base.state,
dm_con_state->base.crtc));
/* /*
* In case of failure or MST no need to update connector status or notify the OS * In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context. * since (for MST case) MST does this in its own context.
@ -2658,8 +2917,9 @@ static void handle_hpd_irq(void *param)
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none && if (new_connection_type == dc_connection_none &&
aconnector->dc_link->type == dc_connection_none) aconnector->dc_link->type == dc_connection_none &&
dm_set_dpms_off(aconnector->dc_link); dm_crtc_state)
dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
@ -2674,7 +2934,15 @@ static void handle_hpd_irq(void *param)
} }
static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) static void handle_hpd_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
handle_hpd_irq_helper(aconnector);
}
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{ {
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret; uint8_t dret;
@ -2752,6 +3020,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
} }
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
}
static void handle_hpd_rx_irq(void *param) static void handle_hpd_rx_irq(void *param)
{ {
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
@ -2763,14 +3050,16 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data; union hpd_irq_data hpd_irq_data;
bool lock_flag = 0; bool link_loss = false;
bool has_left_work = false;
int idx = aconnector->base.index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
if (adev->dm.disable_hpd_irq) if (adev->dm.disable_hpd_irq)
return; return;
/* /*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be * conflict, after implement i2c helper, this mutex should be
@ -2778,44 +3067,42 @@ static void handle_hpd_rx_irq(void *param)
*/ */
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
read_hpd_rx_irq_data(dc_link, &hpd_irq_data); result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
&link_loss, true, &has_left_work);
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || if (!has_left_work)
(dc_link->type == dc_connection_mst_branch)) { goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
result = true; if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dm_handle_hpd_rx_irq(aconnector); schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
goto out;
}
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg(aconnector);
goto out; goto out;
} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { }
result = false;
dm_handle_hpd_rx_irq(aconnector); if (link_loss) {
bool skip = false;
spin_lock(&offload_wq->offload_lock);
skip = offload_wq->is_handling_link_loss;
if (!skip)
offload_wq->is_handling_link_loss = true;
spin_unlock(&offload_wq->offload_lock);
if (!skip)
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
goto out; goto out;
} }
} }
/*
* TODO: We need the lock to avoid touching DC state while it's being
* modified during automated compliance testing, or when link loss
* happens. While this should be split into subhandlers and proper
* interfaces to avoid having to conditionally lock like this in the
* outer layer, we need this workaround temporarily to allow MST
* lightup in some scenarios to avoid timeout.
*/
if (!amdgpu_in_reset(adev) &&
(hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
mutex_lock(&adev->dm.dc_lock);
lock_flag = 1;
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
#else
result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
#endif
if (!amdgpu_in_reset(adev) && lock_flag)
mutex_unlock(&adev->dm.dc_lock);
out: out:
if (result && !is_mst_root_connector) { if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */ /* Downstream Port status changed. */
@ -2899,6 +3186,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params, amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq, handle_hpd_rx_irq,
(void *) aconnector); (void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
} }
} }
} }
@ -4664,6 +4955,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
add_modifier(mods, size, capacity, AMD_FMT_MOD | add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
@ -4676,6 +4977,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
add_modifier(mods, size, capacity, AMD_FMT_MOD | add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
@ -4761,10 +5073,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
if (modifier_has_dcc(modifier) && !force_disable_dcc) { if (modifier_has_dcc(modifier) && !force_disable_dcc) {
uint64_t dcc_address = afb->address + afb->base.offsets[1]; uint64_t dcc_address = afb->address + afb->base.offsets[1];
bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
dcc->enable = 1; dcc->enable = 1;
dcc->meta_pitch = afb->base.pitches[1]; dcc->meta_pitch = afb->base.pitches[1];
dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); dcc->independent_64b_blks = independent_64b_blks;
if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
if (independent_64b_blks && independent_128b_blks)
dcc->dcc_ind_blk = hubp_ind_block_64b;
else if (independent_128b_blks)
dcc->dcc_ind_blk = hubp_ind_block_128b;
else if (independent_64b_blks && !independent_128b_blks)
dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
else
dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
} else {
if (independent_64b_blks)
dcc->dcc_ind_blk = hubp_ind_block_64b;
else
dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
}
address->grph.meta_addr.low_part = lower_32_bits(dcc_address); address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
address->grph.meta_addr.high_part = upper_32_bits(dcc_address); address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
@ -5600,9 +5929,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
{ {
struct drm_connector *drm_connector = &aconnector->base; struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps; uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link)); dc_link_get_link_cap(aconnector->dc_link));
if (stream->link && stream->link->local_sink)
max_dsc_target_bpp_limit_override =
stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
/* Set DSC policy according to dsc_clock_en */ /* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed( dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
@ -5612,7 +5947,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps, dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
0, max_dsc_target_bpp_limit_override,
link_bandwidth_kbps, link_bandwidth_kbps,
&stream->timing, &stream->timing,
&stream->timing.dsc_cfg)) { &stream->timing.dsc_cfg)) {
@ -5963,6 +6298,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config; state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma; state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is flattened */ /* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base; return &state->base;
@ -8679,7 +9015,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* and rely on sending it from software. * and rely on sending it from software.
*/ */
if (acrtc_attach->base.state->event && if (acrtc_attach->base.state->event &&
acrtc_state->active_planes > 0) { acrtc_state->active_planes > 0 &&
!acrtc_state->force_dpms_off) {
drm_crtc_vblank_get(pcrtc); drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags); spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
@ -10819,6 +11156,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_dm_connector *amdgpu_dm_connector = struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector); to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = NULL; struct dm_connector_state *dm_con_state = NULL;
struct dc_sink *sink;
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
@ -10830,28 +11168,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
goto update; goto update;
} }
if (!edid) { sink = amdgpu_dm_connector->dc_sink ?
amdgpu_dm_connector->dc_sink :
amdgpu_dm_connector->dc_em_sink;
if (!edid || !sink) {
dm_con_state = to_dm_connector_state(connector->state); dm_con_state = to_dm_connector_state(connector->state);
amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0; amdgpu_dm_connector->pixel_clock_mhz = 0;
connector->display_info.monitor_range.min_vfreq = 0;
connector->display_info.monitor_range.max_vfreq = 0;
freesync_capable = false;
goto update; goto update;
} }
dm_con_state = to_dm_connector_state(connector->state); dm_con_state = to_dm_connector_state(connector->state);
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
goto update;
}
if (!adev->dm.freesync_module) if (!adev->dm.freesync_module)
goto update; goto update;
if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { || sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false; bool edid_check_required = false;
if (edid) { if (edid) {
@ -10898,7 +11239,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
freesync_capable = true; freesync_capable = true;
} }
} }
} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) { if (i >= 0 && vsdb_info.freesync_supported) {
timing = &edid->detailed_timings[i]; timing = &edid->detailed_timings[i];

View File

@ -47,6 +47,8 @@
#define AMDGPU_DM_MAX_CRTC 6 #define AMDGPU_DM_MAX_CRTC 6
#define AMDGPU_DM_MAX_NUM_EDP 2 #define AMDGPU_DM_MAX_NUM_EDP 2
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/* /*
#include "include/amdgpu_dal_power_if.h" #include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h" #include "amdgpu_dm_irq.h"
@ -86,6 +88,21 @@ struct dm_compressor_info {
uint64_t gpu_addr; uint64_t gpu_addr;
}; };
typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
/**
* struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
*
* @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
* @dmub_notify: notification for callback function
* @adev: amdgpu_device pointer
*/
struct dmub_hpd_work {
struct work_struct handle_hpd_work;
struct dmub_notification *dmub_notify;
struct amdgpu_device *adev;
};
/** /**
* struct vblank_control_work - Work data for vblank control * struct vblank_control_work - Work data for vblank control
* @work: Kernel work data for the work event * @work: Kernel work data for the work event
@ -154,6 +171,48 @@ struct dal_allocation {
u64 gpu_addr; u64 gpu_addr;
}; };
/**
* struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
* offload work
*/
struct hpd_rx_irq_offload_work_queue {
/**
* @wq: workqueue structure to queue offload work.
*/
struct workqueue_struct *wq;
/**
* @offload_lock: To protect fields of offload work queue.
*/
spinlock_t offload_lock;
/**
* @is_handling_link_loss: Used to prevent inserting link loss event when
* we're handling link loss
*/
bool is_handling_link_loss;
/**
* @aconnector: The aconnector that this work queue is attached to
*/
struct amdgpu_dm_connector *aconnector;
};
/**
* struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
*/
struct hpd_rx_irq_offload_work {
/**
* @work: offload work
*/
struct work_struct work;
/**
* @data: reference irq data which is used while handling offload work
*/
union hpd_irq_data data;
/**
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
};
/** /**
* struct amdgpu_display_manager - Central amdgpu display manager device * struct amdgpu_display_manager - Central amdgpu display manager device
* *
@ -190,8 +249,30 @@ struct amdgpu_display_manager {
*/ */
struct dmub_srv *dmub_srv; struct dmub_srv *dmub_srv;
/**
* @dmub_notify:
*
* Notification from DMUB.
*/
struct dmub_notification *dmub_notify; struct dmub_notification *dmub_notify;
/**
* @dmub_callback:
*
* Callback functions to handle notification from DMUB.
*/
dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
/**
* @dmub_thread_offload:
*
* Flag to indicate if callback is offload.
*/
bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
/** /**
* @dmub_fb_info: * @dmub_fb_info:
* *
@ -422,7 +503,12 @@ struct amdgpu_display_manager {
*/ */
struct crc_rd_work *crc_rd_wrk; struct crc_rd_work *crc_rd_wrk;
#endif #endif
/**
* @hpd_rx_offload_wq:
*
* Work queue to offload works of hpd_rx_irq
*/
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
/** /**
* @mst_encoders: * @mst_encoders:
* *
@ -439,6 +525,7 @@ struct amdgpu_display_manager {
*/ */
struct list_head da_list; struct list_head da_list;
struct completion dmub_aux_transfer_done; struct completion dmub_aux_transfer_done;
struct workqueue_struct *delayed_hpd_wq;
/** /**
* @brightness: * @brightness:
@ -542,6 +629,8 @@ struct dm_crtc_state {
bool dsc_force_changed; bool dsc_force_changed;
bool vrr_supported; bool vrr_supported;
bool force_dpms_off;
struct mod_freesync_config freesync_config; struct mod_freesync_config freesync_config;
struct dc_info_packet vrr_infopacket; struct dc_info_packet vrr_infopacket;

View File

@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
{ {
struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link; struct dc_link *link = connector->dc_link;
struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings; struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL; char *wr_buf = NULL;
const uint32_t wr_buf_size = 40; const uint32_t wr_buf_size = 40;
@ -313,7 +314,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
prefer_link_settings.lane_count = param[0]; prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1]; prefer_link_settings.link_rate = param[1];
dp_retrain_link_dp_test(link, &prefer_link_settings, false); dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
kfree(wr_buf); kfree(wr_buf);
return size; return size;

View File

@ -448,6 +448,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct drm_connector_state *conn_state; struct drm_connector_state *conn_state;
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
if (config->dpms_off) { if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector); hdcp_remove_display(hdcp_work, link_index, aconnector);
@ -460,8 +462,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->index = aconnector->base.index; display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE; display->state = MOD_HDCP_DISPLAY_ACTIVE;
if (aconnector->dc_sink != NULL) if (aconnector->dc_sink)
link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); sink = aconnector->dc_sink;
else if (aconnector->dc_em_sink)
sink = aconnector->dc_em_sink;
if (sink != NULL)
link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst; display->controller = CONTROLLER_ID_D0 + config->otg_inst;
display->dig_fe = config->dig_fe; display->dig_fe = config->dig_fe;
@ -470,8 +477,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->stream_enc_idx = config->stream_enc_idx; display->stream_enc_idx = config->stream_enc_idx;
link->link_enc_idx = config->link_enc_idx; link->link_enc_idx = config->link_enc_idx;
link->phy_idx = config->phy_idx; link->phy_idx = config->phy_idx;
link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link, if (sink)
aconnector->dc_sink->sink_signal) ? 1 : 0; link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal);
link->hdcp_supported_informational = link_is_hdcp14;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled; link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled; link->dp.mst_enabled = config->mst_enabled;

View File

@ -40,6 +40,39 @@
#include "dm_helpers.h" #include "dm_helpers.h"
struct monitor_patch_info {
unsigned int manufacturer_id;
unsigned int product_id;
void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
unsigned int patch_param;
};
static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
static const struct monitor_patch_info monitor_patch_table[] = {
{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
};
static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
{
if (edid_caps)
edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
}
static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
&& (edid_caps->product_id == monitor_patch_table[i].product_id)) {
monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
ret++;
}
return ret;
}
/* dm_helpers_parse_edid_caps /* dm_helpers_parse_edid_caps
* *
* Parse edid caps * Parse edid caps
@ -125,6 +158,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
kfree(sads); kfree(sads);
kfree(sadb); kfree(sadb);
amdgpu_dm_patch_edid_caps(edid_caps);
return result; return result;
} }
@ -751,3 +786,17 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
&new_downspread.raw, &new_downspread.raw,
sizeof(new_downspread)); sizeof(new_downspread));
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
{
// FPGA programming for this clock in diags framework that
// needs to go through dm layer, therefore leave dummy interace here
}
void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
/* TODO: add peridic detection implementation */
}
#endif

View File

@ -542,7 +542,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
params[i].sink->ctx->dc->res_pool->dscs[0], params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps, &params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
0, params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
0, 0,
params[i].timing, params[i].timing,
&params[i].timing->dsc_cfg)) { &params[i].timing->dsc_cfg)) {
@ -574,7 +574,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0], param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps, &param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override, param.sink->ctx->dc->debug.dsc_min_slice_height_override,
0, param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
(int) kbps, param.timing, &dsc_config); (int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel; return dsc_config.bits_per_pixel;

View File

@ -1604,6 +1604,16 @@ static enum bp_result bios_parser_get_encoder_cap_info(
ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0; ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
info->HDMI_6GB_EN = (record->encodercaps & info->HDMI_6GB_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0; ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
info->IS_DP2_CAPABLE = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0;
info->DP_UHBR10_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0;
info->DP_UHBR13_5_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0;
info->DP_UHBR20_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0;
#endif
info->DP_IS_USB_C = (record->encodercaps & info->DP_IS_USB_C = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0; ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;

View File

@ -340,6 +340,13 @@ static enum bp_result transmitter_control_v1_7(
const struct command_table_helper *cmd = bp->cmd_helper; const struct command_table_helper *cmd = bp->cmd_helper;
struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0};
#if defined(CONFIG_DRM_AMD_DC_DCN)
uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0;
if (dc_is_dp_signal(cntl->signal))
hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0;
#endif
dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter);
dig_v1_7.action = (uint8_t)cntl->action; dig_v1_7.action = (uint8_t)cntl->action;
@ -353,6 +360,9 @@ static enum bp_result transmitter_control_v1_7(
dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id;
#if defined(CONFIG_DRM_AMD_DC_DCN)
dig_v1_7.HPO_instance = hpo_instance;
#endif
dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10;
if (cntl->action == TRANSMITTER_CONTROL_ENABLE || if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||

View File

@ -459,9 +459,9 @@ static void dcn_bw_calc_rq_dlg_ttu(
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs; struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs; struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs; struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
struct _vcs_dpi_display_rq_params_st rq_param = {0}; struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param;
struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0}; struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param;
struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } }; struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input;
float total_active_bw = 0; float total_active_bw = 0;
float total_prefetch_bw = 0; float total_prefetch_bw = 0;
int total_flip_bytes = 0; int total_flip_bytes = 0;
@ -470,45 +470,48 @@ static void dcn_bw_calc_rq_dlg_ttu(
memset(dlg_regs, 0, sizeof(*dlg_regs)); memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs)); memset(ttu_regs, 0, sizeof(*ttu_regs));
memset(rq_regs, 0, sizeof(*rq_regs)); memset(rq_regs, 0, sizeof(*rq_regs));
memset(rq_param, 0, sizeof(*rq_param));
memset(dlg_sys_param, 0, sizeof(*dlg_sys_param));
memset(input, 0, sizeof(*input));
for (i = 0; i < number_of_planes; i++) { for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i]; total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i]; total_prefetch_bw += v->prefetch_bandwidth[i];
total_flip_bytes += v->total_immediate_flip_bytes[i]; total_flip_bytes += v->total_immediate_flip_bytes[i];
} }
dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
if (dlg_sys_param.total_flip_bw < 0.0) if (dlg_sys_param->total_flip_bw < 0.0)
dlg_sys_param.total_flip_bw = 0; dlg_sys_param->total_flip_bw = 0;
dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark; dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark;
dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark; dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
dlg_sys_param.t_urg_wm_us = v->urgent_watermark; dlg_sys_param->t_urg_wm_us = v->urgent_watermark;
dlg_sys_param.t_extra_us = v->urgent_extra_latency; dlg_sys_param->t_extra_us = v->urgent_extra_latency;
dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
dlg_sys_param.total_flip_bytes = total_flip_bytes; dlg_sys_param->total_flip_bytes = total_flip_bytes;
pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe); pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe);
input.clks_cfg.dcfclk_mhz = v->dcfclk; input->clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk; input->clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk; input->clks_cfg.dppclk_mhz = v->dppclk;
input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk; input->clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level; input->clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger; // dc->dml.logger = pool->base.logger;
input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
//input[in_idx].dout.output_standard; //input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/ /*todo: soc->sr_enter_plus_exit_time??*/
dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src); dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param); dml1_extract_rq_regs(dml, rq_regs, rq_param);
dml1_rq_dlg_get_dlg_params( dml1_rq_dlg_get_dlg_params(
dml, dml,
dlg_regs, dlg_regs,
ttu_regs, ttu_regs,
rq_param.dlg, &rq_param->dlg,
dlg_sys_param, dlg_sys_param,
input, input,
true, true,

View File

@ -38,6 +38,8 @@
#include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h" #include "clk/clk_11_0_0_sh_mask.h"
#include "irq/dcn20/irq_service_dcn20.h"
#undef FN #undef FN
#define FN(reg_name, field_name) \ #define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
@ -221,6 +223,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false; bool force_reset = false;
bool p_state_change_support; bool p_state_change_support;
int total_plane_count; int total_plane_count;
int irq_src;
uint32_t hpd_state;
if (dc->work_arounds.skip_clock_update) if (dc->work_arounds.skip_clock_update)
return; return;
@ -238,7 +242,13 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->res_pool->pp_smu) if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs; pp_smu = &dc->res_pool->pp_smu->nv_funcs;
if (display_count == 0) for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) {
hpd_state = dal_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
if (hpd_state)
break;
}
if (display_count == 0 && !hpd_state)
enter_display_off = true; enter_display_off = true;
if (enter_display_off == safe_to_lower) { if (enter_display_off == safe_to_lower) {

View File

@ -42,6 +42,7 @@
#include "clk/clk_10_0_2_sh_mask.h" #include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h" #include "renoir_ip_offset.h"
#include "irq/dcn21/irq_service_dcn21.h"
/* Constants */ /* Constants */
@ -66,11 +67,9 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i]; const struct dc_stream_state *stream = context->streams[i];
/* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
tmds_present = true; tmds_present = true;
} }
@ -131,9 +130,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc; struct dc *dc = clk_mgr_base->ctx->dc;
int display_count; int display_count;
int irq_src;
bool update_dppclk = false; bool update_dppclk = false;
bool update_dispclk = false; bool update_dispclk = false;
bool dpp_clock_lowered = false; bool dpp_clock_lowered = false;
uint32_t hpd_state;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
@ -149,8 +150,15 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = rn_get_active_display_cnt_wa(dc, context); display_count = rn_get_active_display_cnt_wa(dc, context);
for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) {
hpd_state = dal_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
if (hpd_state)
break;
}
/* if we can go lower, go lower */ /* if we can go lower, go lower */
if (display_count == 0) { if (display_count == 0 && !hpd_state) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */ /* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;

View File

@ -582,8 +582,8 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A, .wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG, .wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333, .pstate_latency_us = 11.65333,
.sr_exit_time_us = 5.32, .sr_exit_time_us = 7.95,
.sr_enter_plus_exit_time_us = 6.38, .sr_enter_plus_exit_time_us = 9,
.valid = true, .valid = true,
}, },
{ {

View File

@ -87,7 +87,7 @@ int dcn31_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i]; const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc->funcs->is_dig_enabled && if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++; display_count++;
} }
@ -142,6 +142,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true); dcn31_smu_set_Z9_support(clk_mgr, true);
dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
} }
@ -166,6 +167,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false); dcn31_smu_set_Z9_support(clk_mgr, false);
dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
} }
@ -640,7 +642,7 @@ void dcn31_clk_mgr_construct(
sizeof(struct dcn31_watermarks), sizeof(struct dcn31_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part); &clk_mgr->smu_wm_set.mc_address.quad_part);
if (clk_mgr->smu_wm_set.wm_set == 0) { if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms; clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0; clk_mgr->smu_wm_set.mc_address.quad_part = 0;
} }

View File

@ -71,6 +71,8 @@
#include "dmub/dmub_srv.h" #include "dmub/dmub_srv.h"
#include "dcn30/dcn30_vpg.h"
#include "i2caux_interface.h" #include "i2caux_interface.h"
#include "dce/dmub_hw_lock_mgr.h" #include "dce/dmub_hw_lock_mgr.h"
@ -255,6 +257,24 @@ static bool create_links(
goto failed_alloc; goto failed_alloc;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
dc->caps.dp_hpo &&
link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
/* FPGA case - Allocate HPO DP link encoder */
if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
if (link->hpo_dp_link_enc == NULL) {
BREAK_TO_DEBUGGER();
goto failed_alloc;
}
link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
}
}
#endif
link->link_status.dpcd_caps = &link->dpcd_caps; link->link_status.dpcd_caps = &link->dpcd_caps;
enc_init.ctx = dc->ctx; enc_init.ctx = dc->ctx;
@ -1544,7 +1564,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
} }
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc) void dc_z10_restore(const struct dc *dc)
{ {
if (dc->hwss.z10_restore) if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc); dc->hwss.z10_restore(dc);
@ -1783,6 +1803,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc); post_surface_trace(dc);
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
if (is_flip_pending_in_pipes(dc, context)) if (is_flip_pending_in_pipes(dc, context))
return; return;
@ -1990,7 +2015,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
} }
if (u->plane_info->dcc.enable != u->surface->dcc.enable if (u->plane_info->dcc.enable != u->surface->dcc.enable
|| u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
/* During DCC on/off, stutter period is calculated before /* During DCC on/off, stutter period is calculated before
* DCC has fully transitioned. This results in incorrect * DCC has fully transitioned. This results in incorrect
@ -2532,6 +2557,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
enum surface_update_type update_type, enum surface_update_type update_type,
struct dc_state *context) struct dc_state *context)
{ {
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct vpg *vpg;
#endif
int j; int j;
// Stream updates // Stream updates
@ -2552,6 +2580,11 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vrr_infopacket || stream_update->vrr_infopacket ||
stream_update->vsc_infopacket || stream_update->vsc_infopacket ||
stream_update->vsp_infopacket) { stream_update->vsp_infopacket) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
vpg = pipe_ctx->stream_res.stream_enc->vpg;
if (vpg && vpg->funcs->vpg_poweron)
vpg->funcs->vpg_poweron(vpg);
#endif
resource_build_info_frame(pipe_ctx); resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx);
} }
@ -2968,6 +3001,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true; new_pipe->plane_state->force_full_update = true;
} }
} else if (update_type == UPDATE_TYPE_FAST) {
/* Previous frame finished and HW is ready for optimization. */
dc_post_update_surfaces_to_stream(dc);
} }
@ -3024,15 +3060,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
pipe_ctx->plane_state->force_full_update = false; pipe_ctx->plane_state->force_full_update = false;
} }
} }
/*let's use current_state to update watermark etc*/
if (update_type >= UPDATE_TYPE_FULL) {
dc_post_update_surfaces_to_stream(dc);
if (dc_ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
}
return; return;

View File

@ -51,6 +51,8 @@
#include "inc/link_enc_cfg.h" #include "inc/link_enc_cfg.h"
#include "inc/link_dpcd.h" #include "inc/link_dpcd.h"
#include "dc/dcn30/dcn30_vpg.h"
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
#define LINK_INFO(...) \ #define LINK_INFO(...) \
@ -64,6 +66,31 @@
/******************************************************************************* /*******************************************************************************
* Private functions * Private functions
******************************************************************************/ ******************************************************************************/
#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link)
{
struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder(
link->dc->res_pool);
if (!link->hpo_dp_link_enc && enc) {
link->hpo_dp_link_enc = enc;
link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
}
return (link->hpo_dp_link_enc != NULL);
}
static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link)
{
if (link->hpo_dp_link_enc) {
link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN;
link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN;
link->hpo_dp_link_enc = NULL;
}
}
#endif
static void dc_link_destruct(struct dc_link *link) static void dc_link_destruct(struct dc_link *link)
{ {
int i; int i;
@ -91,6 +118,12 @@ static void dc_link_destruct(struct dc_link *link)
link->link_enc->funcs->destroy(&link->link_enc); link->link_enc->funcs->destroy(&link->link_enc);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (link->hpo_dp_link_enc) {
remove_dp_hpo_link_encoder_from_link(link);
}
#endif
if (link->local_sink) if (link->local_sink)
dc_sink_release(link->local_sink); dc_sink_release(link->local_sink);
@ -928,6 +961,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false; return false;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
add_dp_hpo_link_encoder_to_link(link);
#endif
if (link->type == dc_connection_mst_branch) { if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n", LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index); link->link_index);
@ -1173,6 +1211,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
sizeof(link->mst_stream_alloc_table.stream_allocations)); sizeof(link->mst_stream_alloc_table.stream_allocations));
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
reset_dp_hpo_stream_encoders_for_link(link);
#endif
link->type = dc_connection_none; link->type = dc_connection_none;
sink_caps.signal = SIGNAL_TYPE_NONE; sink_caps.signal = SIGNAL_TYPE_NONE;
/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
@ -1209,6 +1252,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
} }
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
#endif
/* get out of low power state */ /* get out of low power state */
if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT)
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
@ -1549,6 +1596,9 @@ static bool dc_link_construct(struct dc_link *link,
} }
DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
#endif
/* Update link encoder tracking variables. These are used for the dynamic /* Update link encoder tracking variables. These are used for the dynamic
* assignment of link encoders to streams. * assignment of link encoders to streams.
@ -1741,17 +1791,36 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* get link settings for video mode timing */ /* get link settings for video mode timing */
decide_link_settings(stream, &link_settings); decide_link_settings(stream, &link_settings);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING &&
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
dp_enable_mst_on_sink(link, true);
}
#endif
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/ /*in case it is not on*/
link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
/* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
} else {
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr,
state, false);
}
#else
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization) if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state->clk_mgr->funcs->update_clocks(state->clk_mgr,
state, false); state, false);
#endif
// during mode switch we do DP_SET_POWER off then on, and OUI is lost // during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link); dpcd_set_source_specific_data(link);
@ -1780,7 +1849,12 @@ static enum dc_status enable_link_dp(struct dc_state *state,
else else
fec_enable = true; fec_enable = true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
dp_set_fec_enable(link, fec_enable);
#else
dp_set_fec_enable(link, fec_enable); dp_set_fec_enable(link, fec_enable);
#endif
// during mode set we do DP_SET_POWER off then on, aux writes are lost // during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 || if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@ -2284,6 +2358,9 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
if (dc_is_dp_signal(signal)) { if (dc_is_dp_signal(signal)) {
/* SST DP, eDP */ /* SST DP, eDP */
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_link_settings link_settings = link->cur_link_settings;
#endif
if (dc_is_dp_sst_signal(signal)) if (dc_is_dp_sst_signal(signal))
dp_disable_link_phy(link, signal); dp_disable_link_phy(link, signal);
else else
@ -2291,8 +2368,15 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
if (dc_is_dp_sst_signal(signal) || if (dc_is_dp_sst_signal(signal) ||
link->mst_stream_alloc_table.stream_count == 0) { link->mst_stream_alloc_table.stream_count == 0) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
dp_set_fec_enable(link, false);
dp_set_fec_ready(link, false);
}
#else
dp_set_fec_enable(link, false); dp_set_fec_enable(link, false);
dp_set_fec_ready(link, false); dp_set_fec_ready(link, false);
#endif
} }
} else { } else {
if (signal != SIGNAL_TYPE_VIRTUAL) if (signal != SIGNAL_TYPE_VIRTUAL)
@ -2475,9 +2559,14 @@ static bool dp_active_dongle_validate_timing(
break; break;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
dongle_caps->extendedCapValid == true) {
#else
if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false) dongle_caps->extendedCapValid == false)
return true; return true;
#endif
/* Check Pixel Encoding */ /* Check Pixel Encoding */
switch (timing->pixel_encoding) { switch (timing->pixel_encoding) {
@ -2520,6 +2609,89 @@ static bool dp_active_dongle_validate_timing(
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false; return false;
#if defined(CONFIG_DRM_AMD_DC_DCN)
}
if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
dongle_caps->dfp_cap_ext.supported) {
if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
return false;
if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
return false;
if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
return false;
if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
return false;
if (timing->display_color_depth == COLOR_DEPTH_666 &&
!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_888 &&
!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
!dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
return false;
} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
return false;
if (timing->display_color_depth == COLOR_DEPTH_888 &&
!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
!dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
return false;
} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
return false;
if (timing->display_color_depth == COLOR_DEPTH_888 &&
!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
!dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
return false;
} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
return false;
if (timing->display_color_depth == COLOR_DEPTH_888 &&
!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
return false;
else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
!dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
return false;
}
}
#endif
return true; return true;
} }
@ -3024,6 +3196,107 @@ static void update_mst_stream_alloc_table(
link->mst_stream_alloc_table.stream_allocations[i] = link->mst_stream_alloc_table.stream_allocations[i] =
work_table[i]; work_table[i];
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Payload allocation/deallocation for SST introduced in DP2.0
*/
enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct link_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
DC_LOGGER_INIT(link->ctx->logger);
/* slot X.Y for SST payload deallocate */
if (!allocate) {
avg_time_slots_per_mtp = dc_fixpt_from_int(0);
DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream"
"X: %d "
"Y: %d",
dc_fixpt_floor(
avg_time_slots_per_mtp),
dc_fixpt_ceil(
dc_fixpt_shl(
dc_fixpt_sub_int(
avg_time_slots_per_mtp,
dc_fixpt_floor(
avg_time_slots_per_mtp)),
26)));
hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
hpo_dp_link_encoder,
hpo_dp_stream_encoder->inst,
avg_time_slots_per_mtp);
}
/* calculate VC payload and update branch with new payload allocation table*/
if (!dpcd_write_128b_132b_sst_payload_allocation_table(
stream,
link,
&proposed_table,
allocate)) {
DC_LOG_ERROR("SST Update Payload: Failed to update "
"allocation table for "
"pipe idx: %d\n",
pipe_ctx->pipe_idx);
}
proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder;
ASSERT(proposed_table.stream_count == 1);
//TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
"vcp_id: %d "
"slot_count: %d\n",
(void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
proposed_table.stream_allocations[0].vcp_id,
proposed_table.stream_allocations[0].slot_count);
/* program DP source TX for payload */
hpo_dp_link_encoder->funcs->update_stream_allocation_table(
hpo_dp_link_encoder,
&proposed_table);
/* poll for ACT handled */
if (!dpcd_poll_for_allocation_change_trigger(link)) {
// Failures will result in blackscreen and errors logged
BREAK_TO_DEBUGGER();
}
/* slot X.Y for SST payload allocate */
if (allocate) {
avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
DC_LOG_DP2("SST Update Payload: "
"slot.X: %d "
"slot.Y: %d",
dc_fixpt_floor(
avg_time_slots_per_mtp),
dc_fixpt_ceil(
dc_fixpt_shl(
dc_fixpt_sub_int(
avg_time_slots_per_mtp,
dc_fixpt_floor(
avg_time_slots_per_mtp)),
26)));
hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
hpo_dp_link_encoder,
hpo_dp_stream_encoder->inst,
avg_time_slots_per_mtp);
}
/* Always return DC_OK.
* If part of sequence fails, log failure(s) and show blackscreen
*/
return DC_OK;
}
#endif
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm * because stream_encoder is not exposed to dm
@ -3198,6 +3471,10 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{ {
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL;
#endif
if (cp_psp && cp_psp->funcs.update_stream_config) { if (cp_psp && cp_psp->funcs.update_stream_config) {
struct cp_psp_stream_config config = {0}; struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode = enum dp_panel_mode panel_mode =
@ -3209,8 +3486,23 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) {
config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; link_enc = pipe_ctx->stream->link->link_enc;
config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
} else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
pipe_ctx->stream->ctx->dc,
pipe_ctx->stream);
config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
}
ASSERT(link_enc);
if (link_enc)
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (is_dp_128b_132b_signal(pipe_ctx)) {
config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst;
config.dp2_enabled = 1;
}
#endif #endif
config.dpms_off = dpms_off; config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
@ -3222,15 +3514,100 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
} }
#endif #endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct link_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
uint8_t req_slot_count = 0;
uint8_t vc_id = 1; /// VC ID always 1 for SST
struct dc_link_settings link_settings = {0};
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
decide_link_settings(stream, &link_settings);
stream->link->cur_link_settings = link_settings;
/* Enable clock, Configure lane count, and Enable Link Encoder*/
enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings);
#ifdef DIAGS_BUILD
/* Workaround for FPGA HPO capture DP link data:
* HPO capture will set link to active mode
* This workaround is required to get a capture from start of frame
*/
if (!dc->debug.fpga_hpo_capture_en) {
struct encoder_set_dp_phy_pattern_param params = {0};
params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE;
/* Set link active */
stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern(
stream->link->hpo_dp_link_enc,
&params);
}
#endif
/* Enable DP_STREAM_ENC */
dc->hwss.enable_stream(pipe_ctx);
/* Set DPS PPS SDP (AKA "info frames") */
if (pipe_ctx->stream->timing.flags.DSC) {
dp_set_dsc_pps_sdp(pipe_ctx, true, true);
}
/* Allocate Payload */
if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
// MST case
uint8_t i;
proposed_table.stream_count = state->stream_count;
for (i = 0; i < state->stream_count; i++) {
avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
proposed_table.stream_allocations[i].slot_count = req_slot_count;
proposed_table.stream_allocations[i].vcp_id = i+1;
/* NOTE: This makes assumption that pipe_ctx index is same as stream index */
proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
}
} else {
// SST case
avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
proposed_table.stream_count = 1; /// Always 1 stream for SST
proposed_table.stream_allocations[0].slot_count = req_slot_count;
proposed_table.stream_allocations[0].vcp_id = vc_id;
proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
}
stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table(
stream->link->hpo_dp_link_enc,
&proposed_table);
stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size(
stream->link->hpo_dp_link_enc,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
avg_time_slots_per_mtp);
dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
}
#endif
void core_link_enable_stream( void core_link_enable_stream(
struct dc_state *state, struct dc_state *state,
struct pipe_ctx *pipe_ctx) struct pipe_ctx *pipe_ctx)
{ {
struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream; struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
enum dc_status status; enum dc_status status;
struct link_encoder *link_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
#endif #endif
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@ -3238,23 +3615,57 @@ void core_link_enable_stream(
dc_is_virtual_signal(pipe_ctx->stream->signal)) dc_is_virtual_signal(pipe_ctx->stream->signal))
return; return;
if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY)
link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream);
else
link_enc = stream->link->link_enc;
ASSERT(link_enc);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
&& !is_dp_128b_132b_signal(pipe_ctx)) {
#else
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup( #endif
stream->link->link_enc, if (link_enc)
pipe_ctx->stream->signal); link_enc->funcs->setup(
link_enc,
pipe_ctx->stream->signal);
pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst, pipe_ctx->stream_res.tg->inst,
stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
} }
if (dc_is_dp_signal(pipe_ctx->stream->signal)) #if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute(
pipe_ctx->stream_res.hpo_dp_stream_enc,
&stream->timing,
stream->output_color_space,
stream->use_vsc_sdp_for_colorimetry,
stream->timing.flags.DSC,
false);
otg_out_dest = OUT_MUX_HPO_DP;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
stream->output_color_space,
stream->use_vsc_sdp_for_colorimetry,
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
}
#else
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
&stream->timing, &stream->timing,
stream->output_color_space, stream->output_color_space,
stream->use_vsc_sdp_for_colorimetry, stream->use_vsc_sdp_for_colorimetry,
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute( pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
@ -3288,9 +3699,18 @@ void core_link_enable_stream(
pipe_ctx->stream->apply_edp_fast_boot_optimization = false; pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
#if defined(CONFIG_DRM_AMD_DC_DCN)
// Enable VPG before building infoframe
if (vpg && vpg->funcs->vpg_poweron)
vpg->funcs->vpg_poweron(vpg);
#endif
resource_build_info_frame(pipe_ctx); resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* Do not touch link on seamless boot optimization. */ /* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) { if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false; pipe_ctx->stream->dpms_off = false;
@ -3365,10 +3785,16 @@ void core_link_enable_stream(
* as a workaround for the incorrect value being applied * as a workaround for the incorrect value being applied
* from transmitter control. * from transmitter control.
*/ */
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
is_dp_128b_132b_signal(pipe_ctx)))
#else
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
stream->link->link_enc->funcs->setup( #endif
stream->link->link_enc, if (link_enc)
pipe_ctx->stream->signal); link_enc->funcs->setup(
link_enc,
pipe_ctx->stream->signal);
dc->hwss.enable_stream(pipe_ctx); dc->hwss.enable_stream(pipe_ctx);
@ -3377,12 +3803,17 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal) || if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal)) { dc_is_virtual_signal(pipe_ctx->stream->signal)) {
dp_set_dsc_on_rx(pipe_ctx, true); dp_set_dsc_on_rx(pipe_ctx, true);
dp_set_dsc_pps_sdp(pipe_ctx, true); dp_set_dsc_pps_sdp(pipe_ctx, true, true);
} }
} }
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_allocate_mst_payload(pipe_ctx); dc_link_allocate_mst_payload(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_DCN)
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
is_dp_128b_132b_signal(pipe_ctx))
dc_link_update_sst_payload(pipe_ctx, true);
#endif
dc->hwss.unblank_stream(pipe_ctx, dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings); &pipe_ctx->stream->link->cur_link_settings);
@ -3399,6 +3830,11 @@ void core_link_enable_stream(
dc->hwss.enable_audio_stream(pipe_ctx); dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx)) {
fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
}
#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal) || if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal)) dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true); dp_set_dsc_enable(pipe_ctx, true);
@ -3415,6 +3851,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream; struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link; struct dc_link *link = stream->sink->link;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
#endif
if (!IS_DIAG_DC(dc->ctx->dce_environment) && if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal)) dc_is_virtual_signal(pipe_ctx->stream->signal))
@ -3434,6 +3873,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx); deallocate_mst_payload(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_DCN)
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
is_dp_128b_132b_signal(pipe_ctx))
dc_link_update_sst_payload(pipe_ctx, false);
#endif
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
struct ext_hdmi_settings settings = {0}; struct ext_hdmi_settings settings = {0};
@ -3460,14 +3904,44 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
} }
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
!is_dp_128b_132b_signal(pipe_ctx)) {
/* In DP1.x SST mode, our encoder will go to TPS1
* when link is on but stream is off.
* Disabling link before stream will avoid exposing TPS1 pattern
* during the disable sequence as it will confuse some receivers
* state machine.
* In DP2 or MST mode, our encoder will stay video active
*/
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
} else {
dc->hwss.disable_stream(pipe_ctx);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
}
#else
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx); dc->hwss.disable_stream(pipe_ctx);
#endif
if (pipe_ctx->stream->timing.flags.DSC) { if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, false); dp_set_dsc_enable(pipe_ctx, false);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx)) {
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
}
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (vpg && vpg->funcs->vpg_powerdown)
vpg->funcs->vpg_powerdown(vpg);
#endif
} }
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@ -3600,6 +4074,13 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
if (link_setting != NULL) { if (link_setting != NULL) {
link->preferred_link_setting = *link_setting; link->preferred_link_setting = *link_setting;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(link_setting) ==
DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) {
if (!add_dp_hpo_link_encoder_to_link(link))
memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting));
}
#endif
} else { } else {
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
@ -3641,6 +4122,38 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link, const struct dc_link *link,
const struct dc_link_settings *link_setting) const struct dc_link_settings *link_setting)
{ {
#if defined(CONFIG_DRM_AMD_DC_DCN)
uint32_t total_data_bw_efficiency_x10000 = 0;
uint32_t link_rate_per_lane_kbps = 0;
switch (dp_get_link_encoding_format(link_setting)) {
case DP_8b_10b_ENCODING:
/* For 8b/10b encoding:
* link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
* data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
*/
link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
if (dc_link_should_enable_fec(link)) {
total_data_bw_efficiency_x10000 /= 100;
total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
}
break;
case DP_128b_132b_ENCODING:
/* For 128b/132b encoding:
* link rate is defined in the unit of 10mbps per lane.
* total data bandwidth efficiency is always 96.71%.
*/
link_rate_per_lane_kbps = link_setting->link_rate * 10000;
total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
break;
default:
break;
}
/* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
#else
uint32_t link_bw_kbps = uint32_t link_bw_kbps =
link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */ link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
@ -3671,9 +4184,9 @@ uint32_t dc_link_bandwidth_kbps(
long long fec_link_bw_kbps = link_bw_kbps * 970LL; long long fec_link_bw_kbps = link_bw_kbps * 970LL;
link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL)); link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
} }
return link_bw_kbps; return link_bw_kbps;
#endif
} }
const struct dc_link_settings *dc_link_get_link_cap( const struct dc_link_settings *dc_link_get_link_cap(
@ -3700,14 +4213,14 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
*/ */
if (link->is_dig_mapping_flexible && if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign) { link->dc->res_pool->funcs->link_encs_assign) {
link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
if (link_enc == NULL) if (link_enc == NULL)
link_enc = link_enc_cfg_get_next_avail_link_enc(link->dc, link->dc->current_state); link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
} else } else
link_enc = link->link_enc; link_enc = link->link_enc;
ASSERT(link_enc); ASSERT(link_enc);
return (dc_is_dp_signal(link->connector_signal) && return (dc_is_dp_signal(link->connector_signal) && link_enc &&
link_enc->features.fec_supported && link_enc->features.fec_supported &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
@ -3721,8 +4234,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink && link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) || link->local_sink->edid_caps.panel_patch.disable_fec) ||
(link->connector_signal == SIGNAL_TYPE_EDP && (link->connector_signal == SIGNAL_TYPE_EDP
link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP ))
is_fec_disable = true; is_fec_disable = true;
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable) if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)

File diff suppressed because it is too large Load Diff

View File

@ -176,12 +176,15 @@ static void dpcd_reduce_address_range(
uint8_t * const reduced_data, uint8_t * const reduced_data,
const uint32_t reduced_size) const uint32_t reduced_size)
{ {
const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size);
const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size);
const uint32_t offset = reduced_address - extended_address; const uint32_t offset = reduced_address - extended_address;
if (extended_end_address == reduced_end_address && extended_address == reduced_address) /*
return; /* extended and reduced address ranges point to the same data */ * If the address is same, address was not extended.
* So we do not need to free any memory.
* The data is in original buffer(reduced_data).
*/
if (extended_data == reduced_data)
return;
memcpy(&extended_data[offset], reduced_data, reduced_size); memcpy(&extended_data[offset], reduced_data, reduced_size);
kfree(extended_data); kfree(extended_data);

View File

@ -35,78 +35,128 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
int i; int i;
/* Loop over created link encoder objects. */ /* Loop over created link encoder objects. */
for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { if (stream) {
link_enc = stream->ctx->dc->res_pool->link_encoders[i]; for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
link_enc = stream->ctx->dc->res_pool->link_encoders[i];
if (link_enc && /* Need to check link signal type rather than stream signal type which may not
((uint32_t)stream->signal & link_enc->output_signals)) { * yet match.
if (dc_is_dp_signal(stream->signal)) { */
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */ if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) {
struct dc_link_settings link_settings = {0}; if (dc_is_dp_signal(stream->signal)) {
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */
struct dc_link_settings link_settings = {0};
decide_link_settings(stream, &link_settings); decide_link_settings(stream, &link_settings);
if ((link_settings.link_rate >= LINK_RATE_LOW) && if ((link_settings.link_rate >= LINK_RATE_LOW) &&
link_settings.link_rate <= LINK_RATE_HIGH3) { link_settings.link_rate <= LINK_RATE_HIGH3) {
is_dig_stream = true;
break;
}
} else {
is_dig_stream = true; is_dig_stream = true;
break; break;
} }
} else {
is_dig_stream = true;
break;
} }
} }
} }
return is_dig_stream; return is_dig_stream;
} }
/* Update DIG link encoder resource tracking variables in dc_state. */ static struct link_enc_assignment get_assignment(struct dc *dc, int i)
static void update_link_enc_assignment( {
struct link_enc_assignment assignment;
if (dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_TRANSIENT)
assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i];
else /* LINK_ENC_CFG_STEADY */
assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
return assignment;
}
/* Return stream using DIG link encoder resource. NULL if unused. */
static struct dc_stream_state *get_stream_using_link_enc(
struct dc_state *state,
enum engine_id eng_id)
{
struct dc_stream_state *stream = NULL;
int i;
for (i = 0; i < state->stream_count; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if ((assignment.valid == true) && (assignment.eng_id == eng_id)) {
stream = state->streams[i];
break;
}
}
return stream;
}
static void remove_link_enc_assignment(
struct dc_state *state, struct dc_state *state,
struct dc_stream_state *stream, struct dc_stream_state *stream,
enum engine_id eng_id, enum engine_id eng_id)
bool add_enc)
{ {
int eng_idx; int eng_idx;
int stream_idx;
int i; int i;
if (eng_id != ENGINE_ID_UNKNOWN) { if (eng_id != ENGINE_ID_UNKNOWN) {
eng_idx = eng_id - ENGINE_ID_DIGA; eng_idx = eng_id - ENGINE_ID_DIGA;
stream_idx = -1;
/* Index of stream in dc_state used to update correct entry in /* stream ptr of stream in dc_state used to update correct entry in
* link_enc_assignments table.
*/
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid && assignment.stream == stream) {
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
/* Only add link encoder back to availability pool if not being
* used by any other stream (i.e. removing SST stream or last MST stream).
*/
if (get_stream_using_link_enc(state, eng_id) == NULL)
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id;
stream->link_enc = NULL;
break;
}
}
}
}
static void add_link_enc_assignment(
struct dc_state *state,
struct dc_stream_state *stream,
enum engine_id eng_id)
{
int eng_idx;
int i;
if (eng_id != ENGINE_ID_UNKNOWN) {
eng_idx = eng_id - ENGINE_ID_DIGA;
/* stream ptr of stream in dc_state used to update correct entry in
* link_enc_assignments table. * link_enc_assignments table.
*/ */
for (i = 0; i < state->stream_count; i++) { for (i = 0; i < state->stream_count; i++) {
if (stream == state->streams[i]) { if (stream == state->streams[i]) {
stream_idx = i; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i] = (struct link_enc_assignment){
break;
}
}
/* Update link encoder assignments table, link encoder availability
* pool and link encoder assigned to stream in state.
* Add/remove encoder resource to/from stream.
*/
if (stream_idx != -1) {
if (add_enc) {
state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){
.valid = true, .valid = true,
.ep_id = (struct display_endpoint_id) { .ep_id = (struct display_endpoint_id) {
.link_id = stream->link->link_id, .link_id = stream->link->link_id,
.ep_type = stream->link->ep_type}, .ep_type = stream->link->ep_type},
.eng_id = eng_id}; .eng_id = eng_id,
state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; .stream = stream};
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx]; stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
} else { break;
state->res_ctx.link_enc_assignments[stream_idx].valid = false;
state->res_ctx.link_enc_avail[eng_idx] = eng_id;
stream->link_enc = NULL;
} }
} else {
dm_output_to_console("%s: Stream not found in dc_state.\n", __func__);
} }
/* Attempted to add an encoder assignment for a stream not in dc_state. */
ASSERT(i != state->stream_count);
} }
} }
@ -119,7 +169,7 @@ static enum engine_id find_first_avail_link_enc(
int i; int i;
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_avail[i]; eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN) if (eng_id != ENGINE_ID_UNKNOWN)
break; break;
} }
@ -127,30 +177,51 @@ static enum engine_id find_first_avail_link_enc(
return eng_id; return eng_id;
} }
/* Return stream using DIG link encoder resource. NULL if unused. */ static bool is_avail_link_enc(struct dc_state *state, enum engine_id eng_id)
static struct dc_stream_state *get_stream_using_link_enc(
struct dc_state *state,
enum engine_id eng_id)
{ {
struct dc_stream_state *stream = NULL; bool is_avail = false;
int stream_idx = -1; int eng_idx = eng_id - ENGINE_ID_DIGA;
if (eng_id != ENGINE_ID_UNKNOWN && state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] != ENGINE_ID_UNKNOWN)
is_avail = true;
return is_avail;
}
/* Test for display_endpoint_id equality. */
static bool are_ep_ids_equal(struct display_endpoint_id *lhs, struct display_endpoint_id *rhs)
{
bool are_equal = false;
if (lhs->link_id.id == rhs->link_id.id &&
lhs->link_id.enum_id == rhs->link_id.enum_id &&
lhs->link_id.type == rhs->link_id.type &&
lhs->ep_type == rhs->ep_type)
are_equal = true;
return are_equal;
}
static struct link_encoder *get_link_enc_used_by_link(
struct dc_state *state,
const struct dc_link *link)
{
struct link_encoder *link_enc = NULL;
struct display_endpoint_id ep_id;
int i; int i;
for (i = 0; i < state->stream_count; i++) { ep_id = (struct display_endpoint_id) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; .link_id = link->link_id,
.ep_type = link->ep_type};
if (assignment.valid && (assignment.eng_id == eng_id)) { for (i = 0; i < state->stream_count; i++) {
stream_idx = i; struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
break;
} if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id))
link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
} }
if (stream_idx != -1) return link_enc;
stream = state->streams[stream_idx];
else
dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id);
return stream;
} }
void link_enc_cfg_init( void link_enc_cfg_init(
@ -161,10 +232,12 @@ void link_enc_cfg_init(
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (dc->res_pool->link_encoders[i]) if (dc->res_pool->link_encoders[i])
state->res_ctx.link_enc_avail[i] = (enum engine_id) i; state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = (enum engine_id) i;
else else
state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
} }
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
} }
void link_enc_cfg_link_encs_assign( void link_enc_cfg_link_encs_assign(
@ -175,11 +248,17 @@ void link_enc_cfg_link_encs_assign(
{ {
enum engine_id eng_id = ENGINE_ID_UNKNOWN; enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i; int i;
int j;
ASSERT(state->stream_count == stream_count);
/* Release DIG link encoder resources before running assignment algorithm. */ /* Release DIG link encoder resources before running assignment algorithm. */
for (i = 0; i < stream_count; i++) for (i = 0; i < stream_count; i++)
dc->res_pool->funcs->link_enc_unassign(state, streams[i]); dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
for (i = 0; i < MAX_PIPES; i++)
ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false);
/* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */ /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */
for (i = 0; i < stream_count; i++) { for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i]; struct dc_stream_state *stream = streams[i];
@ -191,26 +270,82 @@ void link_enc_cfg_link_encs_assign(
/* Physical endpoints have a fixed mapping to DIG link encoders. */ /* Physical endpoints have a fixed mapping to DIG link encoders. */
if (!stream->link->is_dig_mapping_flexible) { if (!stream->link->is_dig_mapping_flexible) {
eng_id = stream->link->eng_id; eng_id = stream->link->eng_id;
update_link_enc_assignment(state, stream, eng_id, true); add_link_enc_assignment(state, stream, eng_id);
} }
} }
/* (b) Then assign encoders to mappable endpoints. */ /* (b) Retain previous assignments for mappable endpoints if encoders still available. */
eng_id = ENGINE_ID_UNKNOWN;
if (state != dc->current_state) {
struct dc_state *prev_state = dc->current_state;
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = state->streams[i];
/* Skip stream if not supported by DIG link encoder. */
if (!is_dig_link_enc_stream(stream))
continue;
if (!stream->link->is_dig_mapping_flexible)
continue;
for (j = 0; j < prev_state->stream_count; j++) {
struct dc_stream_state *prev_stream = prev_state->streams[j];
if (stream == prev_stream && stream->link == prev_stream->link &&
prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {
eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id;
if (is_avail_link_enc(state, eng_id))
add_link_enc_assignment(state, stream, eng_id);
}
}
}
}
/* (c) Then assign encoders to remaining mappable endpoints. */
eng_id = ENGINE_ID_UNKNOWN; eng_id = ENGINE_ID_UNKNOWN;
for (i = 0; i < stream_count; i++) { for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i]; struct dc_stream_state *stream = streams[i];
/* Skip stream if not supported by DIG link encoder. */ /* Skip stream if not supported by DIG link encoder. */
if (!is_dig_link_enc_stream(stream)) if (!is_dig_link_enc_stream(stream)) {
ASSERT(stream->link->is_dig_mapping_flexible != true);
continue; continue;
}
/* Mappable endpoints have a flexible mapping to DIG link encoders. */ /* Mappable endpoints have a flexible mapping to DIG link encoders. */
if (stream->link->is_dig_mapping_flexible) { if (stream->link->is_dig_mapping_flexible) {
eng_id = find_first_avail_link_enc(stream->ctx, state); struct link_encoder *link_enc = NULL;
update_link_enc_assignment(state, stream, eng_id, true);
/* Skip if encoder assignment retained in step (b) above. */
if (stream->link_enc)
continue;
/* For MST, multiple streams will share the same link / display
* endpoint. These streams should use the same link encoder
* assigned to that endpoint.
*/
link_enc = get_link_enc_used_by_link(state, stream->link);
if (link_enc == NULL)
eng_id = find_first_avail_link_enc(stream->ctx, state);
else
eng_id = link_enc->preferred_engine;
add_link_enc_assignment(state, stream, eng_id);
} }
} }
link_enc_cfg_validate(dc, state);
/* Update transient assignments. */
for (i = 0; i < MAX_PIPES; i++) {
dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i] =
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
}
/* Current state mode will be set to steady once this state committed. */
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
} }
void link_enc_cfg_link_enc_unassign( void link_enc_cfg_link_enc_unassign(
@ -226,16 +361,16 @@ void link_enc_cfg_link_enc_unassign(
if (stream->link_enc) if (stream->link_enc)
eng_id = stream->link_enc->preferred_engine; eng_id = stream->link_enc->preferred_engine;
update_link_enc_assignment(state, stream, eng_id, false); remove_link_enc_assignment(state, stream, eng_id);
} }
bool link_enc_cfg_is_transmitter_mappable( bool link_enc_cfg_is_transmitter_mappable(
struct dc_state *state, struct dc *dc,
struct link_encoder *link_enc) struct link_encoder *link_enc)
{ {
bool is_mappable = false; bool is_mappable = false;
enum engine_id eng_id = link_enc->preferred_engine; enum engine_id eng_id = link_enc->preferred_engine;
struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id); struct dc_stream_state *stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id);
if (stream) if (stream)
is_mappable = stream->link->is_dig_mapping_flexible; is_mappable = stream->link->is_dig_mapping_flexible;
@ -243,73 +378,214 @@ bool link_enc_cfg_is_transmitter_mappable(
return is_mappable; return is_mappable;
} }
struct dc_link *link_enc_cfg_get_link_using_link_enc( struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc(
struct dc_state *state, struct dc *dc,
enum engine_id eng_id) enum engine_id eng_id)
{ {
struct dc_link *link = NULL; struct dc_stream_state *stream = NULL;
int stream_idx = -1;
int i; int i;
for (i = 0; i < state->stream_count; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; struct link_enc_assignment assignment = get_assignment(dc, i);
if (assignment.valid && (assignment.eng_id == eng_id)) { if ((assignment.valid == true) && (assignment.eng_id == eng_id)) {
stream_idx = i; stream = assignment.stream;
break; break;
} }
} }
if (stream_idx != -1) return stream;
link = state->streams[stream_idx]->link; }
else
dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
struct dc_link *link_enc_cfg_get_link_using_link_enc(
struct dc *dc,
enum engine_id eng_id)
{
struct dc_link *link = NULL;
struct dc_stream_state *stream = NULL;
stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id);
if (stream)
link = stream->link;
// dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link; return link;
} }
struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
struct dc_state *state, struct dc *dc,
const struct dc_link *link) const struct dc_link *link)
{ {
struct link_encoder *link_enc = NULL; struct link_encoder *link_enc = NULL;
struct display_endpoint_id ep_id; struct display_endpoint_id ep_id;
int stream_idx = -1;
int i; int i;
ep_id = (struct display_endpoint_id) { ep_id = (struct display_endpoint_id) {
.link_id = link->link_id, .link_id = link->link_id,
.ep_type = link->ep_type}; .ep_type = link->ep_type};
for (i = 0; i < state->stream_count; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; struct link_enc_assignment assignment = get_assignment(dc, i);
if (assignment.valid && if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) {
assignment.ep_id.link_id.id == ep_id.link_id.id && link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id &&
assignment.ep_id.link_id.type == ep_id.link_id.type &&
assignment.ep_id.ep_type == ep_id.ep_type) {
stream_idx = i;
break; break;
} }
} }
if (stream_idx != -1)
link_enc = state->streams[stream_idx]->link_enc;
return link_enc; return link_enc;
} }
struct link_encoder *link_enc_cfg_get_next_avail_link_enc( struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
const struct dc *dc,
const struct dc_state *state)
{ {
struct link_encoder *link_enc = NULL; struct link_encoder *link_enc = NULL;
enum engine_id eng_id; enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
int i;
eng_id = find_first_avail_link_enc(dc->ctx, state); for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
if (eng_id != ENGINE_ID_UNKNOWN) encs_assigned[i] = ENGINE_ID_UNKNOWN;
link_enc = dc->res_pool->link_encoders[eng_id - ENGINE_ID_DIGA];
/* Add assigned encoders to list. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = get_assignment(dc, i);
if (assignment.valid)
encs_assigned[assignment.eng_id - ENGINE_ID_DIGA] = assignment.eng_id;
}
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (encs_assigned[i] == ENGINE_ID_UNKNOWN) {
link_enc = dc->res_pool->link_encoders[i];
break;
}
}
return link_enc; return link_enc;
} }
struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
struct dc *dc,
const struct dc_stream_state *stream)
{
struct link_encoder *link_enc;
link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link);
return link_enc;
}
bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id)
{
bool is_avail = true;
int i;
/* Add assigned encoders to list. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = get_assignment(dc, i);
if (assignment.valid && assignment.eng_id == eng_id) {
is_avail = false;
break;
}
}
return is_avail;
}
bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
{
bool is_valid = false;
bool valid_entries = true;
bool valid_stream_ptrs = true;
bool valid_uniqueness = true;
bool valid_avail = true;
bool valid_streams = true;
int i, j;
uint8_t valid_count = 0;
uint8_t dig_stream_count = 0;
int matching_stream_ptrs = 0;
int eng_ids_per_ep_id[MAX_PIPES] = {0};
/* (1) No. valid entries same as stream count. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid)
valid_count++;
if (is_dig_link_enc_stream(state->streams[i]))
dig_stream_count++;
}
if (valid_count != dig_stream_count)
valid_entries = false;
/* (2) Matching stream ptrs. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid) {
if (assignment.stream == state->streams[i])
matching_stream_ptrs++;
else
valid_stream_ptrs = false;
}
}
/* (3) Each endpoint assigned unique encoder. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment_i = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment_i.valid) {
struct display_endpoint_id ep_id_i = assignment_i.ep_id;
eng_ids_per_ep_id[i]++;
for (j = 0; j < MAX_PIPES; j++) {
struct link_enc_assignment assignment_j =
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j];
if (j == i)
continue;
if (assignment_j.valid) {
struct display_endpoint_id ep_id_j = assignment_j.ep_id;
if (are_ep_ids_equal(&ep_id_i, &ep_id_j) &&
assignment_i.eng_id != assignment_j.eng_id) {
valid_uniqueness = false;
eng_ids_per_ep_id[i]++;
}
}
}
}
}
/* (4) Assigned encoders not in available pool. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid) {
for (j = 0; j < dc->res_pool->res_cap->num_dig_link_enc; j++) {
if (state->res_ctx.link_enc_cfg_ctx.link_enc_avail[j] == assignment.eng_id) {
valid_avail = false;
break;
}
}
}
}
/* (5) All streams have valid link encoders. */
for (i = 0; i < state->stream_count; i++) {
struct dc_stream_state *stream = state->streams[i];
if (is_dig_link_enc_stream(stream) && stream->link_enc == NULL) {
valid_streams = false;
break;
}
}
is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;
ASSERT(is_valid);
return is_valid;
}

View File

@ -17,6 +17,7 @@
#include "link_enc_cfg.h" #include "link_enc_cfg.h"
#include "clk_mgr.h" #include "clk_mgr.h"
#include "inc/link_dpcd.h" #include "inc/link_dpcd.h"
#include "dccg.h"
static uint8_t convert_to_count(uint8_t lttpr_repeater_count) static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
{ {
@ -61,6 +62,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
sizeof(state)); sizeof(state));
} }
void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
{
if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
&dp_test_mode, sizeof(dp_test_mode));
}
void dp_enable_link_phy( void dp_enable_link_phy(
struct dc_link *link, struct dc_link *link,
enum signal_type signal, enum signal_type signal,
@ -79,7 +87,7 @@ void dp_enable_link_phy(
/* Link should always be assigned encoder when en-/disabling. */ /* Link should always be assigned encoder when en-/disabling. */
if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
else else
link_enc = link->link_enc; link_enc = link->link_enc;
ASSERT(link_enc); ASSERT(link_enc);
@ -111,12 +119,37 @@ void dp_enable_link_phy(
link->cur_link_settings = *link_settings; link->cur_link_settings = *link_settings;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
/* TODO - DP2.0 HW: notify link rate change here */
} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
#else
if (dc->clk_mgr->funcs->notify_link_rate_change) if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
#endif
if (dmcu != NULL && dmcu->funcs->lock_phy) if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu); dmcu->funcs->lock_phy(dmcu);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
enable_dp_hpo_output(link, link_settings);
} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
link_enc,
link_settings,
clock_source);
} else {
link_enc->funcs->enable_dp_mst_output(
link_enc,
link_settings,
clock_source);
}
}
#else
if (dc_is_dp_sst_signal(signal)) { if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output( link_enc->funcs->enable_dp_output(
link_enc, link_enc,
@ -128,10 +161,11 @@ void dp_enable_link_phy(
link_settings, link_settings,
clock_source); clock_source);
} }
#endif
if (dmcu != NULL && dmcu->funcs->unlock_phy) if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu); dmcu->funcs->unlock_phy(dmcu);
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
dp_receiver_power_ctrl(link, true); dp_receiver_power_ctrl(link, true);
} }
@ -206,11 +240,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{ {
struct dc *dc = link->ctx->dc; struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu; struct dmcu *dmcu = dc->res_pool->dmcu;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc;
#endif
struct link_encoder *link_enc; struct link_encoder *link_enc;
/* Link should always be assigned encoder when en-/disabling. */ /* Link should always be assigned encoder when en-/disabling. */
if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
else else
link_enc = link->link_enc; link_enc = link->link_enc;
ASSERT(link_enc); ASSERT(link_enc);
@ -221,18 +258,34 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
if (signal == SIGNAL_TYPE_EDP) { if (signal == SIGNAL_TYPE_EDP) {
if (link->dc->hwss.edp_backlight_control) if (link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false); link->dc->hwss.edp_backlight_control(link, false);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
disable_dp_hpo_output(link, signal);
else
link_enc->funcs->disable_output(link_enc, signal);
#else
link_enc->funcs->disable_output(link_enc, signal); link_enc->funcs->disable_output(link_enc, signal);
#endif
link->dc->hwss.edp_power_control(link, false); link->dc->hwss.edp_power_control(link, false);
} else { } else {
if (dmcu != NULL && dmcu->funcs->lock_phy) if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu); dmcu->funcs->lock_phy(dmcu);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
hpo_link_enc)
disable_dp_hpo_output(link, signal);
else
link_enc->funcs->disable_output(link_enc, signal);
#else
link_enc->funcs->disable_output(link_enc, signal); link_enc->funcs->disable_output(link_enc, signal);
#endif
if (dmcu != NULL && dmcu->funcs->unlock_phy) if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu); dmcu->funcs->unlock_phy(dmcu);
} }
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
/* Clear current link setting.*/ /* Clear current link setting.*/
memset(&link->cur_link_settings, 0, memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings)); sizeof(link->cur_link_settings));
@ -273,6 +326,14 @@ bool dp_set_hw_training_pattern(
case DP_TRAINING_PATTERN_SEQUENCE_4: case DP_TRAINING_PATTERN_SEQUENCE_4:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break; break;
#if defined(CONFIG_DRM_AMD_DC_DCN)
case DP_128b_132b_TPS1:
test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
break;
case DP_128b_132b_TPS2:
test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
break;
#endif
default: default:
break; break;
} }
@ -282,6 +343,10 @@ bool dp_set_hw_training_pattern(
return true; return true;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
#define DC_LOGGER \
link->ctx->logger
#endif
void dp_set_hw_lane_settings( void dp_set_hw_lane_settings(
struct dc_link *link, struct dc_link *link,
const struct link_training_settings *link_settings, const struct link_training_settings *link_settings,
@ -293,7 +358,20 @@ void dp_set_hw_lane_settings(
return; return;
/* call Encoder to set lane settings */ /* call Encoder to set lane settings */
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings->link_settings) ==
DP_128b_132b_ENCODING) {
link->hpo_dp_link_enc->funcs->set_ffe(
link->hpo_dp_link_enc,
&link_settings->link_settings,
link_settings->lane_settings[0].FFE_PRESET.raw);
} else if (dp_get_link_encoding_format(&link_settings->link_settings)
== DP_8b_10b_ENCODING) {
encoder->funcs->dp_set_lane_settings(encoder, link_settings);
}
#else
encoder->funcs->dp_set_lane_settings(encoder, link_settings); encoder->funcs->dp_set_lane_settings(encoder, link_settings);
#endif
} }
void dp_set_hw_test_pattern( void dp_set_hw_test_pattern(
@ -304,13 +382,16 @@ void dp_set_hw_test_pattern(
{ {
struct encoder_set_dp_phy_pattern_param pattern_param = {0}; struct encoder_set_dp_phy_pattern_param pattern_param = {0};
struct link_encoder *encoder; struct link_encoder *encoder;
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings);
#endif
/* Access link encoder based on whether it is statically /* Access link encoder based on whether it is statically
* or dynamically assigned to a link. * or dynamically assigned to a link.
*/ */
if (link->is_dig_mapping_flexible && if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign) link->dc->res_pool->funcs->link_encs_assign)
encoder = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else else
encoder = link->link_enc; encoder = link->link_enc;
@ -319,8 +400,28 @@ void dp_set_hw_test_pattern(
pattern_param.custom_pattern_size = custom_pattern_size; pattern_param.custom_pattern_size = custom_pattern_size;
pattern_param.dp_panel_mode = dp_get_panel_mode(link); pattern_param.dp_panel_mode = dp_get_panel_mode(link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (link_encoding_format) {
case DP_128b_132b_ENCODING:
link->hpo_dp_link_enc->funcs->set_link_test_pattern(
link->hpo_dp_link_enc, &pattern_param);
break;
case DP_8b_10b_ENCODING:
ASSERT(encoder);
encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
break;
default:
DC_LOG_ERROR("%s: Unknown link encoding format.", __func__);
break;
}
#else
encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
#endif
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
#undef DC_LOGGER
#endif
void dp_retrain_link_dp_test(struct dc_link *link, void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting, struct dc_link_settings *link_setting,
@ -338,7 +439,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i].stream->link == link) { pipes[i].stream->link == link) {
udelay(100); udelay(100);
pipes[i].stream_res.stream_enc->funcs->dp_blank( pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
pipes[i].stream_res.stream_enc); pipes[i].stream_res.stream_enc);
/* disable any test pattern that might be active */ /* disable any test pattern that might be active */
@ -351,9 +452,10 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
(&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
link->link_enc->funcs->disable_output( if (link->link_enc)
link->link_enc, link->link_enc->funcs->disable_output(
SIGNAL_TYPE_DISPLAY_PORT); link->link_enc,
SIGNAL_TYPE_DISPLAY_PORT);
/* Clear current link setting. */ /* Clear current link setting. */
memset(&link->cur_link_settings, 0, memset(&link->cur_link_settings, 0,
@ -468,7 +570,12 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
/* Enable DSC in encoder */ /* Enable DSC in encoder */
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
&& !is_dp_128b_132b_signal(pipe_ctx)) {
#else
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
#endif
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg); dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@ -495,13 +602,22 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* disable DSC in stream encoder */ /* disable DSC in stream encoder */
if (dc_is_dp_signal(stream->signal)) { if (dc_is_dp_signal(stream->signal)) {
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { #if defined(CONFIG_DRM_AMD_DC_DCN)
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
OPTC_DSC_DISABLED, 0, 0); pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( false,
pipe_ctx->stream_res.stream_enc, false, NULL); NULL,
} true);
else
#endif
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
pipe_ctx->stream_res.stream_enc,
OPTC_DSC_DISABLED, 0, 0);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc, false, NULL, true);
}
} }
/* disable DSC block */ /* disable DSC block */
@ -535,7 +651,16 @@ out:
return result; return result;
} }
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) /*
* For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
* hence PPS info packet update need to use frame update instead of immediate update.
* Added parameter immediate_update for this purpose.
* The decision to use frame update is hard-coded in function dp_update_dsc_config(),
* which is the only place where a "false" would be passed in for param immediate_update.
*
* immediate_update is only applicable when DSC is enabled.
*/
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
{ {
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
struct dc_stream_state *stream = pipe_ctx->stream; struct dc_stream_state *stream = pipe_ctx->stream;
@ -562,16 +687,35 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
if (dc_is_dp_signal(stream->signal)) { if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( #if defined(CONFIG_DRM_AMD_DC_DCN)
pipe_ctx->stream_res.stream_enc, if (is_dp_128b_132b_signal(pipe_ctx))
true, pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
&dsc_packed_pps[0]); pipe_ctx->stream_res.hpo_dp_stream_enc,
true,
&dsc_packed_pps[0],
immediate_update);
else
#endif
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc,
true,
&dsc_packed_pps[0],
immediate_update);
} }
} else { } else {
/* disable DSC PPS in stream encoder */ /* disable DSC PPS in stream encoder */
if (dc_is_dp_signal(stream->signal)) { if (dc_is_dp_signal(stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( #if defined(CONFIG_DRM_AMD_DC_DCN)
pipe_ctx->stream_res.stream_enc, false, NULL); if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
false,
NULL,
true);
else
#endif
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc, false, NULL, true);
} }
} }
@ -589,7 +733,171 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
return false; return false;
dp_set_dsc_on_stream(pipe_ctx, true); dp_set_dsc_on_stream(pipe_ctx, true);
dp_set_dsc_pps_sdp(pipe_ctx, true); dp_set_dsc_pps_sdp(pipe_ctx, true, false);
return true; return true;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
#undef DC_LOGGER
#define DC_LOGGER \
link->ctx->logger
static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
{
switch (link->link_enc->transmitter) {
case TRANSMITTER_UNIPHY_A:
return PHYD32CLKA;
case TRANSMITTER_UNIPHY_B:
return PHYD32CLKB;
case TRANSMITTER_UNIPHY_C:
return PHYD32CLKC;
case TRANSMITTER_UNIPHY_D:
return PHYD32CLKD;
case TRANSMITTER_UNIPHY_E:
return PHYD32CLKE;
default:
return PHYD32CLKA;
}
}
void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings)
{
const struct dc *dc = link->dc;
enum phyd32clk_clock_source phyd32clk;
/* Enable PHY PLL at target bit rate
* UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz)
* UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz)
* UHBR20 = 20Gbps (SYMCLK32 = 625MHz)
*/
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
switch (link_settings->link_rate) {
case LINK_RATE_UHBR10:
dm_set_phyd32clk(dc->ctx, 312500);
break;
case LINK_RATE_UHBR13_5:
dm_set_phyd32clk(dc->ctx, 412875);
break;
case LINK_RATE_UHBR20:
dm_set_phyd32clk(dc->ctx, 625000);
break;
default:
return;
}
} else {
/* DP2.0 HW: call transmitter control to enable PHY */
link->hpo_dp_link_enc->funcs->enable_link_phy(
link->hpo_dp_link_enc,
link_settings,
link->link_enc->transmitter);
}
/* DCCG muxing and DTBCLK DTO */
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
dc->res_pool->dccg->funcs->set_physymclk(
dc->res_pool->dccg,
link->link_enc_hw_inst,
PHYSYMCLK_FORCE_SRC_PHYD32CLK,
true);
phyd32clk = get_phyd32clk_src(link);
dc->res_pool->dccg->funcs->enable_symclk32_le(
dc->res_pool->dccg,
link->hpo_dp_link_enc->inst,
phyd32clk);
link->hpo_dp_link_enc->funcs->link_enable(
link->hpo_dp_link_enc,
link_settings->lane_count);
}
}
void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
{
const struct dc *dc = link->dc;
link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc);
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
dc->res_pool->dccg->funcs->disable_symclk32_le(
dc->res_pool->dccg,
link->hpo_dp_link_enc->inst);
dc->res_pool->dccg->funcs->set_physymclk(
dc->res_pool->dccg,
link->link_enc_hw_inst,
PHYSYMCLK_FORCE_SRC_SYMCLK,
false);
dm_set_phyd32clk(dc->ctx, 0);
} else {
/* DP2.0 HW: call transmitter control to disable PHY */
link->hpo_dp_link_enc->funcs->disable_link_phy(
link->hpo_dp_link_enc,
signal);
}
}
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct pipe_ctx *odm_pipe;
int odm_combine_num_segments = 1;
enum phyd32clk_clock_source phyd32clk;
if (enable) {
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_combine_num_segments++;
dc->res_pool->dccg->funcs->set_dpstreamclk(
dc->res_pool->dccg,
DTBCLK0,
pipe_ctx->stream_res.tg->inst);
phyd32clk = get_phyd32clk_src(stream->link);
dc->res_pool->dccg->funcs->enable_symclk32_se(
dc->res_pool->dccg,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
phyd32clk);
dc->res_pool->dccg->funcs->set_dtbclk_dto(
dc->res_pool->dccg,
pipe_ctx->stream_res.tg->inst,
stream->phy_pix_clk,
odm_combine_num_segments,
&stream->timing);
} else {
dc->res_pool->dccg->funcs->set_dtbclk_dto(
dc->res_pool->dccg,
pipe_ctx->stream_res.tg->inst,
0,
0,
&stream->timing);
dc->res_pool->dccg->funcs->disable_symclk32_se(
dc->res_pool->dccg,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst);
dc->res_pool->dccg->funcs->set_dpstreamclk(
dc->res_pool->dccg,
REFCLK,
pipe_ctx->stream_res.tg->inst);
}
}
void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link)
{
const struct dc *dc = link->dc;
struct dc_state *state = dc->current_state;
uint8_t i;
for (i = 0; i < MAX_PIPES; i++) {
if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc &&
state->res_ctx.pipe_ctx[i].stream &&
state->res_ctx.pipe_ctx[i].stream->link == link &&
!state->res_ctx.pipe_ctx[i].stream->dpms_off) {
setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false);
}
}
}
#undef DC_LOGGER
#endif

View File

@ -41,6 +41,8 @@
#include "set_mode_types.h" #include "set_mode_types.h"
#include "virtual/virtual_stream_encoder.h" #include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h" #include "dpcd_defs.h"
#include "link_enc_cfg.h"
#include "dc_link_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI) #if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h" #include "dce60/dce60_resource.h"
@ -346,6 +348,29 @@ bool resource_construct(
} }
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
pool->hpo_dp_stream_enc_count = 0;
if (create_funcs->create_hpo_dp_stream_encoder) {
for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) {
pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx);
if (pool->hpo_dp_stream_enc[i] == NULL)
DC_ERR("DC: failed to create HPO DP stream encoder!\n");
pool->hpo_dp_stream_enc_count++;
}
}
pool->hpo_dp_link_enc_count = 0;
if (create_funcs->create_hpo_dp_link_encoder) {
for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) {
pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx);
if (pool->hpo_dp_link_enc[i] == NULL)
DC_ERR("DC: failed to create HPO DP link encoder!\n");
pool->hpo_dp_link_enc_count++;
}
}
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
for (i = 0; i < caps->num_mpc_3dlut; i++) { for (i = 0; i < caps->num_mpc_3dlut; i++) {
pool->mpc_lut[i] = dc_create_3dlut_func(); pool->mpc_lut[i] = dc_create_3dlut_func();
@ -1665,6 +1690,22 @@ static void update_stream_engine_usage(
} }
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void update_hpo_dp_stream_engine_usage(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct hpo_dp_stream_encoder *hpo_dp_stream_enc,
bool acquired)
{
int i;
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc)
res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
}
}
#endif
/* TODO: release audio object */ /* TODO: release audio object */
void update_audio_usage( void update_audio_usage(
struct resource_context *res_ctx, struct resource_context *res_ctx,
@ -1709,6 +1750,26 @@ static int acquire_first_free_pipe(
return -1; return -1;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream)
{
int i;
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] &&
pool->hpo_dp_stream_enc[i]) {
return pool->hpo_dp_stream_enc[i];
}
}
return NULL;
}
#endif
static struct audio *find_first_free_audio( static struct audio *find_first_free_audio(
struct resource_context *res_ctx, struct resource_context *res_ctx,
const struct resource_pool *pool, const struct resource_pool *pool,
@ -1799,6 +1860,15 @@ enum dc_status dc_remove_stream_from_ctx(
if (dc->res_pool->funcs->link_enc_unassign) if (dc->res_pool->funcs->link_enc_unassign)
dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
false);
}
#endif
if (del_pipe->stream_res.audio) if (del_pipe->stream_res.audio)
update_audio_usage( update_audio_usage(
&new_ctx->res_ctx, &new_ctx->res_ctx,
@ -2051,6 +2121,31 @@ enum dc_status resource_map_pool_resources(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
true); true);
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Allocate DP HPO Stream Encoder based on signal, hw capabilities
* and link settings
*/
if (dc_is_dp_signal(stream->signal) &&
dc->caps.dp_hpo) {
struct dc_link_settings link_settings = {0};
decide_link_settings(stream, &link_settings);
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
&context->res_ctx, pool, stream);
if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
return DC_NO_STREAM_ENC_RESOURCE;
update_hpo_dp_stream_engine_usage(
&context->res_ctx, pool,
pipe_ctx->stream_res.hpo_dp_stream_enc,
true);
}
}
#endif
/* TODO: Add check if ASIC support and EDID audio */ /* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio && if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) && dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
@ -2147,7 +2242,7 @@ enum dc_status dc_validate_global_state(
* Update link encoder to stream assignment. * Update link encoder to stream assignment.
* TODO: Split out reason allocation from validation. * TODO: Split out reason allocation from validation.
*/ */
if (dc->res_pool->funcs->link_encs_assign) if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign( dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count); dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif #endif
@ -2726,9 +2821,24 @@ bool pipe_need_reprogram(
if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
return true; return true;
/* DIG link encoder resource assignment for stream changed. */ #if defined(CONFIG_DRM_AMD_DC_DCN)
if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc) if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
return true; return true;
#endif
/* DIG link encoder resource assignment for stream changed. */
if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
bool need_reprogram = false;
struct dc *dc = pipe_ctx_old->stream->ctx->dc;
enum link_enc_cfg_mode mode = dc->current_state->res_ctx.link_enc_cfg_ctx.mode;
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
if (link_enc_cfg_get_link_enc_used_by_stream(dc, pipe_ctx_old->stream) != pipe_ctx->stream->link_enc)
need_reprogram = true;
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = mode;
return need_reprogram;
}
return false; return false;
} }
@ -2871,7 +2981,8 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
res = DC_FAIL_CONTROLLER_VALIDATE; res = DC_FAIL_CONTROLLER_VALIDATE;
if (res == DC_OK) { if (res == DC_OK) {
if (!link->link_enc->funcs->validate_output_with_stream( if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
!link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream)) link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE; res = DC_FAIL_ENC_VALIDATE;
} }
@ -2975,3 +3086,22 @@ void get_audio_check(struct audio_info *aud_modes,
} }
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
const struct resource_pool *pool)
{
uint8_t i;
struct hpo_dp_link_encoder *enc = NULL;
ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS);
for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) {
enc = pool->hpo_dp_link_enc[i];
break;
}
}
return enc;
}
#endif

View File

@ -45,7 +45,7 @@
/* forward declaration */ /* forward declaration */
struct aux_payload; struct aux_payload;
#define DC_VER "3.2.149" #define DC_VER "3.2.154"
#define MAX_SURFACES 3 #define MAX_SURFACES 3
#define MAX_PLANES 6 #define MAX_PLANES 6
@ -183,6 +183,9 @@ struct dc_caps {
unsigned int cursor_cache_size; unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES]; struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color; struct dc_color_caps color;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dp_hpo;
#endif
bool vbios_lttpr_aware; bool vbios_lttpr_aware;
bool vbios_lttpr_enable; bool vbios_lttpr_enable;
}; };
@ -289,7 +292,15 @@ struct dc_cap_funcs {
struct link_training_settings; struct link_training_settings;
#if defined(CONFIG_DRM_AMD_DC_DCN)
union allow_lttpr_non_transparent_mode {
struct {
bool DP1_4A : 1;
bool DP2_0 : 1;
} bits;
unsigned char raw;
};
#endif
/* Structure to hold configuration flags set by dm at dc creation. */ /* Structure to hold configuration flags set by dm at dc creation. */
struct dc_config { struct dc_config {
bool gpu_vm_support; bool gpu_vm_support;
@ -302,7 +313,11 @@ struct dc_config {
bool edp_no_power_sequencing; bool edp_no_power_sequencing;
bool force_enum_edp; bool force_enum_edp;
bool forced_clocks; bool forced_clocks;
#if defined(CONFIG_DRM_AMD_DC_DCN)
union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode;
#else
bool allow_lttpr_non_transparent_mode; bool allow_lttpr_non_transparent_mode;
#endif
bool multi_mon_pp_mclk_switch; bool multi_mon_pp_mclk_switch;
bool disable_dmcu; bool disable_dmcu;
bool enable_4to1MPC; bool enable_4to1MPC;
@ -456,6 +471,8 @@ union mem_low_power_enable_options {
bool cm: 1; bool cm: 1;
bool mpc: 1; bool mpc: 1;
bool optc: 1; bool optc: 1;
bool vpg: 1;
bool afmt: 1;
} bits; } bits;
uint32_t u32All; uint32_t u32All;
}; };
@ -614,16 +631,19 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip; bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa; bool usbc_combo_phy_reset_wa;
bool enable_dram_clock_change_one_display_vactive; bool enable_dram_clock_change_one_display_vactive;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TODO - remove once tested */
bool legacy_dp2_lt;
#endif
union mem_low_power_enable_options enable_mem_low_power; union mem_low_power_enable_options enable_mem_low_power;
bool force_vblank_alignment; bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */ /* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc; bool enable_dmub_aux_for_legacy_ddc;
bool optimize_edp_link_rate; /* eDP ILR */ bool optimize_edp_link_rate; /* eDP ILR */
/* force enable edp FEC */
bool force_enable_edp_fec;
/* FEC/PSR1 sequence enable delay in 100us */ /* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us; uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10; bool disable_z10;
bool enable_sw_cntl_psr; bool enable_sw_cntl_psr;
@ -1146,6 +1166,12 @@ struct dpcd_caps {
struct dc_lttpr_caps lttpr_caps; struct dc_lttpr_caps lttpr_caps;
struct psr_caps psr_caps; struct psr_caps psr_caps;
#if defined(CONFIG_DRM_AMD_DC_DCN)
union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
union dp_main_line_channel_coding_cap channel_coding_cap;
union dp_sink_video_fallback_formats fallback_formats;
union dp_fec_capability1 fec_cap1;
#endif
}; };
union dpcd_sink_ext_caps { union dpcd_sink_ext_caps {
@ -1337,7 +1363,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable); bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc); void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc); void dc_z10_save_init(struct dc *dc);
#endif #endif

View File

@ -53,7 +53,17 @@ enum dc_link_rate {
LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane
LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane
#if defined(CONFIG_DRM_AMD_DC_DCN)
LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane
/* Starting from DP2.0 link rate enum directly represents actual
* link rate value in unit of 10 mbps
*/
LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane
LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane
LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane
#else
LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane
#endif
}; };
enum dc_link_spread { enum dc_link_spread {
@ -90,17 +100,47 @@ enum dc_post_cursor2 {
POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3, POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dc_dp_ffe_preset_level {
DP_FFE_PRESET_LEVEL0 = 0,
DP_FFE_PRESET_LEVEL1,
DP_FFE_PRESET_LEVEL2,
DP_FFE_PRESET_LEVEL3,
DP_FFE_PRESET_LEVEL4,
DP_FFE_PRESET_LEVEL5,
DP_FFE_PRESET_LEVEL6,
DP_FFE_PRESET_LEVEL7,
DP_FFE_PRESET_LEVEL8,
DP_FFE_PRESET_LEVEL9,
DP_FFE_PRESET_LEVEL10,
DP_FFE_PRESET_LEVEL11,
DP_FFE_PRESET_LEVEL12,
DP_FFE_PRESET_LEVEL13,
DP_FFE_PRESET_LEVEL14,
DP_FFE_PRESET_LEVEL15,
DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15,
};
#endif
enum dc_dp_training_pattern { enum dc_dp_training_pattern {
DP_TRAINING_PATTERN_SEQUENCE_1 = 0, DP_TRAINING_PATTERN_SEQUENCE_1 = 0,
DP_TRAINING_PATTERN_SEQUENCE_2, DP_TRAINING_PATTERN_SEQUENCE_2,
DP_TRAINING_PATTERN_SEQUENCE_3, DP_TRAINING_PATTERN_SEQUENCE_3,
DP_TRAINING_PATTERN_SEQUENCE_4, DP_TRAINING_PATTERN_SEQUENCE_4,
DP_TRAINING_PATTERN_VIDEOIDLE, DP_TRAINING_PATTERN_VIDEOIDLE,
#if defined(CONFIG_DRM_AMD_DC_DCN)
DP_128b_132b_TPS1,
DP_128b_132b_TPS2,
DP_128b_132b_TPS2_CDS,
#endif
}; };
enum dp_link_encoding { enum dp_link_encoding {
DP_UNKNOWN_ENCODING = 0, DP_UNKNOWN_ENCODING = 0,
DP_8b_10b_ENCODING = 1, DP_8b_10b_ENCODING = 1,
#if defined(CONFIG_DRM_AMD_DC_DCN)
DP_128b_132b_ENCODING = 2,
#endif
}; };
struct dc_link_settings { struct dc_link_settings {
@ -112,10 +152,26 @@ struct dc_link_settings {
bool dpcd_source_device_specific_field_support; bool dpcd_source_device_specific_field_support;
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN)
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
uint8_t reserved : 1;
uint8_t no_preshoot : 1;
uint8_t no_deemphasis : 1;
uint8_t method2 : 1;
} settings;
uint8_t raw;
};
#endif
struct dc_lane_settings { struct dc_lane_settings {
enum dc_voltage_swing VOLTAGE_SWING; enum dc_voltage_swing VOLTAGE_SWING;
enum dc_pre_emphasis PRE_EMPHASIS; enum dc_pre_emphasis PRE_EMPHASIS;
enum dc_post_cursor2 POST_CURSOR2; enum dc_post_cursor2 POST_CURSOR2;
#if defined(CONFIG_DRM_AMD_DC_DCN)
union dc_dp_ffe_preset FFE_PRESET;
#endif
}; };
struct dc_link_training_settings { struct dc_link_training_settings {
@ -127,6 +183,9 @@ struct dc_link_training_overrides {
enum dc_voltage_swing *voltage_swing; enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis; enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2; enum dc_post_cursor2 *post_cursor2;
#if defined(CONFIG_DRM_AMD_DC_DCN)
union dc_dp_ffe_preset *ffe_preset;
#endif
uint16_t *cr_pattern_time; uint16_t *cr_pattern_time;
uint16_t *eq_pattern_time; uint16_t *eq_pattern_time;
@ -140,6 +199,16 @@ struct dc_link_training_overrides {
bool *fec_enable; bool *fec_enable;
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN)
union payload_table_update_status {
struct {
uint8_t VC_PAYLOAD_TABLE_UPDATED:1;
uint8_t ACT_HANDLED:1;
} bits;
uint8_t raw;
};
#endif
union dpcd_rev { union dpcd_rev {
struct { struct {
uint8_t MINOR:4; uint8_t MINOR:4;
@ -227,7 +296,14 @@ union lane_align_status_updated {
struct { struct {
uint8_t INTERLANE_ALIGN_DONE:1; uint8_t INTERLANE_ALIGN_DONE:1;
uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1; uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
#if defined(CONFIG_DRM_AMD_DC_DCN)
uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1;
uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1;
uint8_t LT_FAILED_128b_132b:1;
uint8_t RESERVED:1;
#else
uint8_t RESERVED:4; uint8_t RESERVED:4;
#endif
uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1; uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
uint8_t LINK_STATUS_UPDATED:1; uint8_t LINK_STATUS_UPDATED:1;
} bits; } bits;
@ -240,6 +316,12 @@ union lane_adjust {
uint8_t PRE_EMPHASIS_LANE:2; uint8_t PRE_EMPHASIS_LANE:2;
uint8_t RESERVED:4; uint8_t RESERVED:4;
} bits; } bits;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct {
uint8_t PRESET_VALUE :4;
uint8_t RESERVED :4;
} tx_ffe;
#endif
uint8_t raw; uint8_t raw;
}; };
@ -269,6 +351,12 @@ union dpcd_training_lane {
uint8_t MAX_PRE_EMPHASIS_REACHED:1; uint8_t MAX_PRE_EMPHASIS_REACHED:1;
uint8_t RESERVED:2; uint8_t RESERVED:2;
} bits; } bits;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct {
uint8_t PRESET_VALUE :4;
uint8_t RESERVED :4;
} tx_ffe;
#endif
uint8_t raw; uint8_t raw;
}; };
@ -551,12 +639,18 @@ union test_response {
union phy_test_pattern { union phy_test_pattern {
struct { struct {
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* This field is 7 bits for DP2.0 */
uint8_t PATTERN :7;
uint8_t RESERVED :1;
#else
/* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
* and 3 bits for DP1.2. * and 3 bits for DP1.2.
*/ */
uint8_t PATTERN :3; uint8_t PATTERN :3;
/* BY speci, bit7:2 is 0 for DP1.1. */ /* BY speci, bit7:2 is 0 for DP1.1. */
uint8_t RESERVED :5; uint8_t RESERVED :5;
#endif
} bits; } bits;
uint8_t raw; uint8_t raw;
}; };
@ -634,7 +728,14 @@ union dpcd_fec_capability {
uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t BIT_ERROR_COUNT_CAPABLE:1; uint8_t BIT_ERROR_COUNT_CAPABLE:1;
#if defined(CONFIG_DRM_AMD_DC_DCN)
uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1;
uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1;
uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1;
#else
uint8_t RESERVED:4; uint8_t RESERVED:4;
#endif
} bits; } bits;
uint8_t raw; uint8_t raw;
}; };
@ -758,4 +859,125 @@ struct psr_caps {
bool psr_exit_link_training_required; bool psr_exit_link_training_required;
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN)
#define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006
#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020
#define DP_FEC_CAPABILITY_1 0x091
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#define DP_DSC_CONFIGURATION 0x161
#define DP_PHY_SQUARE_PATTERN 0x249
#define DP_128b_132b_SUPPORTED_LINK_RATES 0x2215
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250
#define DP_DSC_SUPPORT_AND_DECODER_COUNT 0x2260
#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270
# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
# define DP_DSC_DECODER_COUNT_SHIFT 5
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xF0006
#define DP_PHY_REPEATER_128b_132b_RATES 0xF0007
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xF0022
#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3)
/* TODO - Use DRM header to replace above once available */
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
uint8_t DP_128b_132b_SUPPORTED :1;
uint8_t RESERVED :6;
} bits;
uint8_t raw;
};
union dp_main_link_channel_coding_lttpr_cap {
struct {
uint8_t DP_128b_132b_SUPPORTED :1;
uint8_t RESERVED :7;
} bits;
uint8_t raw;
};
union dp_128b_132b_supported_link_rates {
struct {
uint8_t UHBR10 :1;
uint8_t UHBR20 :1;
uint8_t UHBR13_5:1;
uint8_t RESERVED:5;
} bits;
uint8_t raw;
};
union dp_128b_132b_supported_lttpr_link_rates {
struct {
uint8_t UHBR10 :1;
uint8_t UHBR13_5:1;
uint8_t UHBR20 :1;
uint8_t RESERVED:5;
} bits;
uint8_t raw;
};
union dp_sink_video_fallback_formats {
struct {
uint8_t dp_1024x768_60Hz_24bpp_support :1;
uint8_t dp_1280x720_60Hz_24bpp_support :1;
uint8_t dp_1920x1080_60Hz_24bpp_support :1;
uint8_t RESERVED :5;
} bits;
uint8_t raw;
};
union dp_fec_capability1 {
struct {
uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1;
uint8_t RESERVED :7;
} bits;
uint8_t raw;
};
struct dp_color_depth_caps {
uint8_t support_6bpc :1;
uint8_t support_8bpc :1;
uint8_t support_10bpc :1;
uint8_t support_12bpc :1;
uint8_t support_16bpc :1;
uint8_t RESERVED :3;
};
struct dp_encoding_format_caps {
uint8_t support_rgb :1;
uint8_t support_ycbcr444:1;
uint8_t support_ycbcr422:1;
uint8_t support_ycbcr420:1;
uint8_t RESERVED :4;
};
union dp_dfp_cap_ext {
struct {
uint8_t supported;
uint8_t max_pixel_rate_in_mps[2];
uint8_t max_video_h_active_width[2];
uint8_t max_video_v_active_height[2];
struct dp_encoding_format_caps encoding_format_caps;
struct dp_color_depth_caps rgb_color_depth_caps;
struct dp_color_depth_caps ycbcr444_color_depth_caps;
struct dp_color_depth_caps ycbcr422_color_depth_caps;
struct dp_color_depth_caps ycbcr420_color_depth_caps;
} fields;
uint8_t raw[12];
};
union dp_128b_132b_training_aux_rd_interval {
struct {
uint8_t VALUE :7;
uint8_t UNIT :1;
} bits;
uint8_t raw;
};
#endif
#endif /* DC_DP_TYPES_H */ #endif /* DC_DP_TYPES_H */

View File

@ -51,7 +51,6 @@ struct dc_dsc_policy {
int min_slice_height; // Must not be less than 8 int min_slice_height; // Must not be less than 8
uint32_t max_target_bpp; uint32_t max_target_bpp;
uint32_t min_target_bpp; uint32_t min_target_bpp;
uint32_t preferred_bpp_x16;
bool enable_dsc_when_not_needed; bool enable_dsc_when_not_needed;
}; };
@ -81,6 +80,16 @@ bool dc_dsc_compute_config(
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp); uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp);
uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const struct dc_crtc_timing *timing,
const int num_slices_h,
const bool is_dp);
/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
* and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
* Hardware/specs limitation should not be writable by DM.
* It should be decoupled from DM specific policy and named differently.
*/
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16, uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy); struct dc_dsc_policy *policy);

View File

@ -45,6 +45,10 @@ struct dc_link_status {
struct link_mst_stream_allocation { struct link_mst_stream_allocation {
/* DIG front */ /* DIG front */
const struct stream_encoder *stream_enc; const struct stream_encoder *stream_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* HPO DP Stream Encoder */
const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
#endif
/* associate DRM payload table with DC stream encoder */ /* associate DRM payload table with DC stream encoder */
uint8_t vcp_id; uint8_t vcp_id;
/* number of slots required for the DP stream in transport packet */ /* number of slots required for the DP stream in transport packet */
@ -150,6 +154,9 @@ struct dc_link {
struct panel_cntl *panel_cntl; struct panel_cntl *panel_cntl;
struct link_encoder *link_enc; struct link_encoder *link_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct hpo_dp_link_encoder *hpo_dp_link_enc;
#endif
struct graphics_object_id link_id; struct graphics_object_id link_id;
/* Endpoint type distinguishes display endpoints which do not have entries /* Endpoint type distinguishes display endpoints which do not have entries
* in the BIOS connector table from those that do. Helps when tracking link * in the BIOS connector table from those that do. Helps when tracking link
@ -296,7 +303,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
* false - no change in Downstream port status. No further action required * false - no change in Downstream port status. No further action required
* from DM. */ * from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss); union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
bool defer_handling, bool *has_left_work);
/* /*
* On eDP links this function call will stall until T12 has elapsed. * On eDP links this function call will stall until T12 has elapsed.
@ -305,9 +313,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
*/ */
bool dc_link_wait_for_t12(struct dc_link *link); bool dc_link_wait_for_t12(struct dc_link *link);
enum dc_status read_hpd_rx_irq_data( void dc_link_dp_handle_automated_test(struct dc_link *link);
struct dc_link *link, void dc_link_dp_handle_link_loss(struct dc_link *link);
union hpd_irq_data *irq_data); bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
struct dc_sink_init_data; struct dc_sink_init_data;

View File

@ -395,9 +395,27 @@ struct dc_lttpr_caps {
uint8_t max_link_rate; uint8_t max_link_rate;
uint8_t phy_repeater_cnt; uint8_t phy_repeater_cnt;
uint8_t max_ext_timeout; uint8_t max_ext_timeout;
#if defined(CONFIG_DRM_AMD_DC_DCN)
union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
#endif
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_dongle_dfp_cap_ext {
bool supported;
uint16_t max_pixel_rate_in_mps;
uint16_t max_video_h_active_width;
uint16_t max_video_v_active_height;
struct dp_encoding_format_caps encoding_format_caps;
struct dp_color_depth_caps rgb_color_depth_caps;
struct dp_color_depth_caps ycbcr444_color_depth_caps;
struct dp_color_depth_caps ycbcr422_color_depth_caps;
struct dp_color_depth_caps ycbcr420_color_depth_caps;
};
#endif
struct dc_dongle_caps { struct dc_dongle_caps {
/* dongle type (DP converter, CV smart dongle) */ /* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type; enum display_dongle_type dongle_type;
@ -411,6 +429,9 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr420_converter; bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz; uint32_t dp_hdmi_max_pixel_clk_in_khz;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_dongle_dfp_cap_ext dfp_cap_ext;
#endif
}; };
/* Scaling format */ /* Scaling format */
enum scaling_transformation { enum scaling_transformation {

View File

@ -96,6 +96,22 @@
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2) NBIO_SR(BIOS_SCRATCH_2)
#define ABM_DCN302_REG_LIST(id)\
ABM_COMMON_REG_LIST_DCE_BASE(), \
SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
SRI(BL1_PWM_USER_LEVEL, ABM, id), \
SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
#define ABM_DCN30_REG_LIST(id)\ #define ABM_DCN30_REG_LIST(id)\
ABM_COMMON_REG_LIST_DCE_BASE(), \ ABM_COMMON_REG_LIST_DCE_BASE(), \
SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \

View File

@ -42,7 +42,7 @@
#define DC_LOGGER \ #define DC_LOGGER \
engine->ctx->logger engine->ctx->logger
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
#define IS_DC_I2CAUX_LOGGING_ENABLED() (false) #define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
#define LOG_FLAG_Error_I2cAux LOG_ERROR #define LOG_FLAG_Error_I2cAux LOG_ERROR
#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
@ -76,7 +76,7 @@ enum {
#define DEFAULT_AUX_ENGINE_MULT 0 #define DEFAULT_AUX_ENGINE_MULT 0
#define DEFAULT_AUX_ENGINE_LENGTH 69 #define DEFAULT_AUX_ENGINE_LENGTH 69
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
static void release_engine( static void release_engine(
struct dce_aux *engine) struct dce_aux *engine)
@ -689,8 +689,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
enum aux_return_code_type operation_result; enum aux_return_code_type operation_result;
bool retry_on_defer = false; bool retry_on_defer = false;
struct ddc *ddc_pin = ddc->ddc_pin; struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; struct dce_aux *aux_engine = NULL;
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); struct aux_engine_dce110 *aux110 = NULL;
uint32_t defer_time_in_ms = 0; uint32_t defer_time_in_ms = 0;
int aux_ack_retries = 0, int aux_ack_retries = 0,
@ -699,6 +699,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_timeout_retries = 0, aux_timeout_retries = 0,
aux_invalid_reply_retries = 0; aux_invalid_reply_retries = 0;
if (ddc_pin) {
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
aux110 = FROM_AUX_ENGINE(aux_engine);
}
if (!payload->reply) { if (!payload->reply) {
payload_reply = false; payload_reply = false;
payload->reply = &reply; payload->reply = &reply;
@ -765,7 +770,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER"); "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER");
/* polling_timeout_period is in us */ /* polling_timeout_period is in us */
defer_time_in_ms += aux110->polling_timeout_period / 1000; if (aux110)
defer_time_in_ms += aux110->polling_timeout_period / 1000;
++aux_defer_retries; ++aux_defer_retries;
fallthrough; fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:

View File

@ -919,6 +919,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
} }
static void dce110_stream_encoder_dp_blank( static void dce110_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc) struct stream_encoder *enc)
{ {
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
@ -967,6 +968,7 @@ static void dce110_stream_encoder_dp_blank(
/* output video stream to link encoder */ /* output video stream to link encoder */
static void dce110_stream_encoder_dp_unblank( static void dce110_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc, struct stream_encoder *enc,
const struct encoder_unblank_param *param) const struct encoder_unblank_param *param)
{ {

View File

@ -46,6 +46,7 @@
#include "transform.h" #include "transform.h"
#include "stream_encoder.h" #include "stream_encoder.h"
#include "link_encoder.h" #include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h" #include "link_hwss.h"
#include "dc_link_dp.h" #include "dc_link_dp.h"
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
@ -57,7 +58,8 @@
#include "audio.h" #include "audio.h"
#include "reg_helper.h" #include "reg_helper.h"
#include "panel_cntl.h" #include "panel_cntl.h"
#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
/* include DCE11 register header files */ /* include DCE11 register header files */
#include "dce/dce_11_0_d.h" #include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h" #include "dce/dce_11_0_sh_mask.h"
@ -1108,11 +1110,23 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
clk_mgr->funcs->enable_pme_wa(clk_mgr); clk_mgr->funcs->enable_pme_wa(clk_mgr);
/* un-mute audio */ /* un-mute audio */
/* TODO: audio should be per stream rather than per link */ /* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( #if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.hpo_dp_stream_enc, false);
else
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, false); pipe_ctx->stream_res.stream_enc, false);
#else
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, false);
#endif
if (pipe_ctx->stream_res.audio) if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.audio->enabled = true; pipe_ctx->stream_res.audio->enabled = true;
} }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
} }
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
@ -1129,14 +1143,32 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
return; return;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.hpo_dp_stream_enc, true);
else
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
#else
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true); pipe_ctx->stream_res.stream_enc, true);
#endif
if (pipe_ctx->stream_res.audio) { if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false; pipe_ctx->stream_res.audio->enabled = false;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.hpo_dp_stream_enc);
else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
#else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc);
#endif
else else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc);
@ -1151,6 +1183,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
* stream->stream_engine_id); * stream->stream_engine_id);
*/ */
} }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
} }
void dce110_disable_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
@ -1158,6 +1193,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream; struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link; struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc *dc = pipe_ctx->stream->ctx->dc;
struct link_encoder *link_enc = NULL;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@ -1166,17 +1202,48 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal))
#else
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
#endif
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc);
dc->hwss.disable_audio_stream(pipe_ctx); dc->hwss.disable_audio_stream(pipe_ctx);
link->link_enc->funcs->connect_dig_be_to_fe( /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
if (link->ep_type == DISPLAY_ENDPOINT_PHY)
link_enc = link->link_enc;
else if (dc->res_pool->funcs->link_encs_assign)
link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
ASSERT(link_enc);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable(
pipe_ctx->stream_res.hpo_dp_stream_enc);
setup_dp_hpo_stream(pipe_ctx, false);
/* TODO - DP2.0 HW: unmap stream from link encoder here */
} else {
if (link_enc)
link_enc->funcs->connect_dig_be_to_fe(
link_enc,
pipe_ctx->stream_res.stream_enc->id,
false);
}
#else
if (link_enc)
link_enc->funcs->connect_dig_be_to_fe(
link->link_enc, link->link_enc,
pipe_ctx->stream_res.stream_enc->id, pipe_ctx->stream_res.stream_enc->id,
false); false);
#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
} }
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@ -1192,7 +1259,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate; params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, true); hws->funcs.edp_backlight_control(link, true);
@ -1210,8 +1277,16 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx); link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
} }
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
pipe_ctx->stream_res.hpo_dp_stream_enc);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
#else
if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); #endif
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) { if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) {
/* /*
@ -1436,6 +1511,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct dc *dc) struct dc *dc)
{ {
struct dc_stream_state *stream = pipe_ctx->stream; struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct drr_params params = {0}; struct drr_params params = {0};
unsigned int event_triggers = 0; unsigned int event_triggers = 0;
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
@ -1451,10 +1527,23 @@ static enum dc_status apply_single_controller_ctx_to_hw(
build_audio_output(context, pipe_ctx, &audio_output); build_audio_output(context, pipe_ctx, &audio_output);
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.audio->inst,
&pipe_ctx->stream->audio_info);
else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.audio->inst,
&pipe_ctx->stream->audio_info);
#else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.audio->inst, pipe_ctx->stream_res.audio->inst,
&pipe_ctx->stream->audio_info); &pipe_ctx->stream->audio_info);
#endif
else else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
@ -1469,10 +1558,18 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info); &pipe_ctx->stream->audio_info);
} }
/* */ #if defined(CONFIG_DRM_AMD_DC_DCN)
/* Do not touch stream timing on seamless boot optimization. */ /* DCN3.1 FPGA Workaround
if (!pipe_ctx->stream->apply_seamless_boot_optimization) * Need to enable HPO DP Stream Encoder before setting OTG master enable.
hws->funcs.enable_stream_timing(pipe_ctx, context, dc); * To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
#endif
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
if (hws->funcs.setup_vupdate_interrupt) if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@ -1499,6 +1596,9 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst); pipe_ctx->stream_res.tg->inst);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp, pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601, COLOR_SPACE_YCBCR601,
@ -1526,6 +1626,18 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (!stream->dpms_off) if (!stream->dpms_off)
core_link_enable_stream(context, pipe_ctx); core_link_enable_stream(context, pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) {
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
}
#endif
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
@ -1537,29 +1649,37 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc) static void power_down_encoders(struct dc *dc)
{ {
int i; int i, j;
/* do not know BIOS back-front mapping, simply blank all. It will not
* hurt for non-DP
*/
for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
dc->res_pool->stream_enc[i]->funcs->dp_blank(
dc->res_pool->stream_enc[i]);
}
for (i = 0; i < dc->link_count; i++) { for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal; enum signal_type signal = dc->links[i]->connector_signal;
if ((signal == SIGNAL_TYPE_EDP) || if ((signal == SIGNAL_TYPE_EDP) ||
(signal == SIGNAL_TYPE_DISPLAY_PORT)) (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
dc->links[i]->link_enc);
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
}
}
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false); dp_receiver_power_ctrl(dc->links[i], false);
}
if (signal != SIGNAL_TYPE_EDP) if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE; signal = SIGNAL_TYPE_NONE;
dc->links[i]->link_enc->funcs->disable_output( if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY)
dc->links[i]->link_enc, signal); dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
dc->links[i]->link_status.link_active = false; dc->links[i]->link_status.link_active = false;
memset(&dc->links[i]->cur_link_settings, 0, memset(&dc->links[i]->cur_link_settings, 0,

View File

@ -466,6 +466,71 @@ void dcn10_log_hw_state(struct dc *dc,
log_mpc_crc(dc, log_ctx); log_mpc_crc(dc, log_ctx);
{
int hpo_dp_link_enc_count = 0;
if (pool->hpo_dp_stream_enc_count > 0) {
DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
hpo_dp_se_state.stream_enc_enabled,
hpo_dp_se_state.otg_inst,
(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
(hpo_dp_se_state.component_depth == 0) ? 6 :
((hpo_dp_se_state.component_depth == 1) ? 8 :
(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
hpo_dp_se_state.vid_stream_enabled,
hpo_dp_se_state.sdp_enabled,
hpo_dp_se_state.compressed_format,
hpo_dp_se_state.mapped_to_link_enc);
}
}
DTN_INFO("\n");
}
/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
for (i = 0; i < dc->link_count; i++)
if (dc->links[i]->hpo_dp_link_enc)
hpo_dp_link_enc_count++;
if (hpo_dp_link_enc_count) {
DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
for (i = 0; i < dc->link_count; i++) {
struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
hpo_dp_link_enc->inst,
hpo_dp_le_state.link_enc_enabled,
(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
hpo_dp_le_state.lane_count,
hpo_dp_le_state.stream_src[0],
hpo_dp_le_state.slot_count[0],
hpo_dp_le_state.vc_rate_x[0],
hpo_dp_le_state.vc_rate_y[0]);
DTN_INFO("\n");
}
}
DTN_INFO("\n");
}
}
DTN_INFO_END(); DTN_INFO_END();
} }
@ -1424,7 +1489,7 @@ void dcn10_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) { for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) { if (fe == dc->res_pool->stream_enc[j]->id) {
dc->res_pool->stream_enc[j]->funcs->dp_blank( dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]); dc->res_pool->stream_enc[j]);
break; break;
} }
@ -1522,7 +1587,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
for (i = 0; i < dc->link_count; i++) { for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i]; struct dc_link *link = dc->links[i];
if (link->link_enc->funcs->is_dig_enabled && if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) && link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
dc->hwss.power_down) { dc->hwss.power_down) {
dc->hwss.power_down(dc); dc->hwss.power_down(dc);
@ -3176,13 +3241,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
{ {
struct pipe_ctx *test_pipe; struct pipe_ctx *test_pipe, *split_pipe;
const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
const struct rect *r1 = &scl_data->recout, *r2; struct rect r1 = scl_data->recout, r2, r2_half;
int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
int cur_layer = pipe_ctx->plane_state->layer_index; int cur_layer = pipe_ctx->plane_state->layer_index;
bool upper_pipe_exists = false;
struct fixed31_32 one = dc_fixpt_from_int(1);
/** /**
* Disable the cursor if there's another pipe above this with a * Disable the cursor if there's another pipe above this with a
@ -3191,26 +3254,33 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
*/ */
for (test_pipe = pipe_ctx->top_pipe; test_pipe; for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) { test_pipe = test_pipe->top_pipe) {
if (!test_pipe->plane_state->visible) // Skip invisible layer and pipe-split plane on same layer
if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
continue; continue;
r2 = &test_pipe->plane_res.scl_data.recout; r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2->x + r2->width; r2_r = r2.x + r2.width;
r2_b = r2->y + r2->height; r2_b = r2.y + r2.height;
split_pipe = test_pipe;
if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) /**
* There is another half plane on same layer because of
* pipe-split, merge together per same height.
*/
for (split_pipe = pipe_ctx->top_pipe; split_pipe;
split_pipe = split_pipe->top_pipe)
if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
r2_half = split_pipe->plane_res.scl_data.recout;
r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
r2.width = r2.width + r2_half.width;
r2_r = r2.x + r2.width;
break;
}
if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
return true; return true;
if (test_pipe->plane_state->layer_index < cur_layer)
upper_pipe_exists = true;
} }
// if plane scaled, assume an upper plane can handle cursor if it exists.
if (upper_pipe_exists &&
(scl_data->ratios.horz.value != one.value ||
scl_data->ratios.vert.value != one.value))
return true;
return false; return false;
} }
@ -3613,7 +3683,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
params.timing.pix_clk_100hz /= 2; params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
} }
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {

View File

@ -1460,5 +1460,14 @@ void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
if (enc->features.flags.bits.IS_HBR3_CAPABLE) if (enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3; max_link_cap.link_rate = LINK_RATE_HIGH3;
if (enc->features.flags.bits.IS_UHBR10_CAPABLE)
max_link_cap.link_rate = LINK_RATE_UHBR10;
if (enc->features.flags.bits.IS_UHBR13_5_CAPABLE)
max_link_cap.link_rate = LINK_RATE_UHBR13_5;
if (enc->features.flags.bits.IS_UHBR20_CAPABLE)
max_link_cap.link_rate = LINK_RATE_UHBR20;
*link_settings = max_link_cap; *link_settings = max_link_cap;
} }

View File

@ -118,6 +118,7 @@ struct dcn10_link_enc_registers {
uint32_t RDPCSTX_PHY_CNTL4; uint32_t RDPCSTX_PHY_CNTL4;
uint32_t RDPCSTX_PHY_CNTL5; uint32_t RDPCSTX_PHY_CNTL5;
uint32_t RDPCSTX_PHY_CNTL6; uint32_t RDPCSTX_PHY_CNTL6;
uint32_t RDPCSPIPE_PHY_CNTL6;
uint32_t RDPCSTX_PHY_CNTL7; uint32_t RDPCSTX_PHY_CNTL7;
uint32_t RDPCSTX_PHY_CNTL8; uint32_t RDPCSTX_PHY_CNTL8;
uint32_t RDPCSTX_PHY_CNTL9; uint32_t RDPCSTX_PHY_CNTL9;

View File

@ -1296,7 +1296,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
* in daisy chain use case * in daisy chain use case
*/ */
j = i; j = i;
if (pool->stream_enc[i]->id == if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine) link->link_enc->preferred_engine)
return pool->stream_enc[i]; return pool->stream_enc[i];
} }

View File

@ -29,6 +29,9 @@
#include "dcn10_stream_encoder.h" #include "dcn10_stream_encoder.h"
#include "reg_helper.h" #include "reg_helper.h"
#include "hw_shared.h" #include "hw_shared.h"
#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
#define DC_LOGGER \ #define DC_LOGGER \
enc1->base.ctx->logger enc1->base.ctx->logger
@ -726,6 +729,16 @@ void enc1_stream_encoder_update_dp_info_packets(
0, /* packetIndex */ 0, /* packetIndex */
&info_frame->vsc); &info_frame->vsc);
/* VSC SDP at packetIndex 1 is used by PSR in DMCUB FW.
* Note that the enablement of GSP1 is not done below,
* it's done in FW.
*/
if (info_frame->vsc.valid)
enc1_update_generic_info_packet(
enc1,
1, /* packetIndex */
&info_frame->vsc);
if (info_frame->spd.valid) if (info_frame->spd.valid)
enc1_update_generic_info_packet( enc1_update_generic_info_packet(
enc1, enc1,
@ -884,6 +897,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
} }
void enc1_stream_encoder_dp_blank( void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc) struct stream_encoder *enc)
{ {
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@ -914,6 +928,8 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */ /* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
/* the encoder stops sending the video stream /* the encoder stops sending the video stream
* at the start of the vertical blanking. * at the start of the vertical blanking.
* Poll for DP_VID_STREAM_STATUS == 0 * Poll for DP_VID_STREAM_STATUS == 0
@ -930,10 +946,13 @@ void enc1_stream_encoder_dp_blank(
*/ */
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
} }
/* output video stream to link encoder */ /* output video stream to link encoder */
void enc1_stream_encoder_dp_unblank( void enc1_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc, struct stream_encoder *enc,
const struct encoder_unblank_param *param) const struct encoder_unblank_param *param)
{ {
@ -1000,6 +1019,8 @@ void enc1_stream_encoder_dp_unblank(
*/ */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
} }
void enc1_stream_encoder_set_avmute( void enc1_stream_encoder_set_avmute(
@ -1381,6 +1402,11 @@ static void enc1_se_disable_dp_audio(
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t value = 0; uint32_t value = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
enc->afmt->funcs->afmt_powerdown(enc->afmt);
#endif
/* Disable Audio packets */ /* Disable Audio packets */
REG_UPDATE_5(DP_SEC_CNTL, REG_UPDATE_5(DP_SEC_CNTL,
DP_SEC_ASP_ENABLE, 0, DP_SEC_ASP_ENABLE, 0,
@ -1444,6 +1470,10 @@ void enc1_se_hdmi_audio_setup(
void enc1_se_hdmi_audio_disable( void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc) struct stream_encoder *enc)
{ {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
enc->afmt->funcs->afmt_powerdown(enc->afmt);
#endif
enc1_se_enable_audio_clock(enc, false); enc1_se_enable_audio_clock(enc, false);
} }

View File

@ -627,9 +627,11 @@ void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc); struct stream_encoder *enc);
void enc1_stream_encoder_dp_blank( void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc); struct stream_encoder *enc);
void enc1_stream_encoder_dp_unblank( void enc1_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc, struct stream_encoder *enc,
const struct encoder_unblank_param *param); const struct encoder_unblank_param *param);

View File

@ -52,6 +52,9 @@
#include "dc_dmub_srv.h" #include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h" #include "dce/dmub_hw_lock_mgr.h"
#include "hw_sequencer.h" #include "hw_sequencer.h"
#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
@ -2135,12 +2138,17 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate; params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2; params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
} }
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
@ -2374,14 +2382,36 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t active_total_with_borders; uint32_t active_total_with_borders;
uint32_t early_control = 0; uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg; struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct link_encoder *link_enc;
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
else
link_enc = link->link_enc;
ASSERT(link_enc);
/* For MST, there are multiply stream go to only one link. /* For MST, there are multiply stream go to only one link.
* connect DIG back_end to front_end while enable_stream and * connect DIG back_end to front_end while enable_stream and
* disconnect them during disable_stream * disconnect them during disable_stream
* BY this, it is logic clean to separate stream and link * BY this, it is logic clean to separate stream and link
*/ */
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, if (is_dp_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.stream_enc->id, true); setup_dp_hpo_stream(pipe_ctx, true);
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
pipe_ctx->stream_res.hpo_dp_stream_enc);
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
link->hpo_dp_link_enc->inst);
}
if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
link_enc->funcs->connect_dig_be_to_fe(
link_enc, pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
if (link->dc->hwss.program_dmdata_engine) if (link->dc->hwss.program_dmdata_engine)
@ -2390,6 +2420,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.update_info_frame(pipe_ctx); link->dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* enable early control to avoid corruption on DP monitor*/ /* enable early control to avoid corruption on DP monitor*/
active_total_with_borders = active_total_with_borders =
timing->h_addressable timing->h_addressable
@ -2406,7 +2439,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
/* enable audio only within mode set */ /* enable audio only within mode set */
if (pipe_ctx->stream_res.audio != NULL) { if (pipe_ctx->stream_res.audio != NULL) {
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc);
else if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
} }
} }

View File

@ -63,6 +63,7 @@
#include "dcn20_dccg.h" #include "dcn20_dccg.h"
#include "dcn20_vmid.h" #include "dcn20_vmid.h"
#include "dc_link_ddc.h" #include "dc_link_ddc.h"
#include "dc_link_dp.h"
#include "dce/dce_panel_cntl.h" #include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h" #include "navi10_ip_offset.h"
@ -86,6 +87,7 @@
#include "dce/dce_aux.h" #include "dce/dce_aux.h"
#include "dce/dce_i2c.h" #include "dce/dce_i2c.h"
#include "vm_helper.h" #include "vm_helper.h"
#include "link_enc_cfg.h"
#include "amdgpu_socbb.h" #include "amdgpu_socbb.h"
@ -1595,15 +1597,32 @@ static void get_pixel_clock_parameters(
const struct dc_stream_state *stream = pipe_ctx->stream; const struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe; struct pipe_ctx *odm_pipe;
int opp_cnt = 1; int opp_cnt = 1;
struct dc_link *link = stream->link;
struct link_encoder *link_enc = NULL;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++; opp_cnt++;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
/* Links supporting dynamically assigned link encoder will be assigned next
* available encoder if one not already assigned.
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign) {
link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream);
if (link_enc == NULL)
link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc);
} else
link_enc = stream->link->link_enc;
ASSERT(link_enc);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->signal_type = pipe_ctx->stream->signal;
pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/ /* TODO: un-hardcode*/
/* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ; LINK_RATE_REF_FREQ_IN_KHZ;
pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->flags.ENABLE_SS = 0;
@ -1854,7 +1873,9 @@ static void swizzle_to_dml_params(
case DC_SW_VAR_D_X: case DC_SW_VAR_D_X:
*sw_mode = dm_sw_var_d_x; *sw_mode = dm_sw_var_d_x;
break; break;
case DC_SW_VAR_R_X:
*sw_mode = dm_sw_var_r_x;
break;
default: default:
ASSERT(0); /* Not supported */ ASSERT(0); /* Not supported */
break; break;
@ -3044,6 +3065,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream) if (!context->res_ctx.pipe_ctx[i].stream)
continue; continue;
if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
} }
return false; return false;
} }
@ -3152,7 +3175,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs, &context->res_ctx.pipe_ctx[i].rq_regs,
pipes[pipe_idx].pipe); &pipes[pipe_idx].pipe);
pipe_idx++; pipe_idx++;
} }
} }

View File

@ -29,6 +29,8 @@
#include "dcn20_stream_encoder.h" #include "dcn20_stream_encoder.h"
#include "reg_helper.h" #include "reg_helper.h"
#include "hw_shared.h" #include "hw_shared.h"
#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
#define DC_LOGGER \ #define DC_LOGGER \
enc1->base.ctx->logger enc1->base.ctx->logger
@ -290,7 +292,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
bool enable, bool enable,
uint8_t *dsc_packed_pps) uint8_t *dsc_packed_pps,
bool immediate_update)
{ {
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@ -444,6 +447,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
} }
void enc2_stream_encoder_dp_unblank( void enc2_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc, struct stream_encoder *enc,
const struct encoder_unblank_param *param) const struct encoder_unblank_param *param)
{ {
@ -522,6 +526,8 @@ void enc2_stream_encoder_dp_unblank(
*/ */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
} }
static void enc2_dp_set_odm_combine( static void enc2_dp_set_odm_combine(

View File

@ -104,6 +104,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(
uint32_t enable_sdp_splitting); uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank( void enc2_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc, struct stream_encoder *enc,
const struct encoder_unblank_param *param); const struct encoder_unblank_param *param);

View File

@ -44,11 +44,14 @@
afmt3->base.ctx afmt3->base.ctx
static void afmt3_setup_hdmi_audio( void afmt3_setup_hdmi_audio(
struct afmt *afmt) struct afmt *afmt)
{ {
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
if (afmt->funcs->afmt_poweron)
afmt->funcs->afmt_poweron(afmt);
/* AFMT_AUDIO_PACKET_CONTROL */ /* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
@ -113,7 +116,7 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels; return cea_channels;
} }
static void afmt3_se_audio_setup( void afmt3_se_audio_setup(
struct afmt *afmt, struct afmt *afmt,
unsigned int az_inst, unsigned int az_inst,
struct audio_info *audio_info) struct audio_info *audio_info)
@ -138,20 +141,24 @@ static void afmt3_se_audio_setup(
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels); REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */ /* Disable forced mem power off */
REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0); if (afmt->funcs->afmt_poweron == NULL)
REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
} }
static void afmt3_audio_mute_control( void afmt3_audio_mute_control(
struct afmt *afmt, struct afmt *afmt,
bool mute) bool mute)
{ {
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
if (mute && afmt->funcs->afmt_powerdown)
afmt->funcs->afmt_powerdown(afmt);
if (!mute && afmt->funcs->afmt_poweron)
afmt->funcs->afmt_poweron(afmt);
/* enable/disable transmission of audio packets */ /* enable/disable transmission of audio packets */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute); REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
} }
static void afmt3_audio_info_immediate_update( void afmt3_audio_info_immediate_update(
struct afmt *afmt) struct afmt *afmt)
{ {
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
@ -160,11 +167,14 @@ static void afmt3_audio_info_immediate_update(
REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
} }
static void afmt3_setup_dp_audio( void afmt3_setup_dp_audio(
struct afmt *afmt) struct afmt *afmt)
{ {
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
if (afmt->funcs->afmt_poweron)
afmt->funcs->afmt_poweron(afmt);
/* AFMT_AUDIO_PACKET_CONTROL */ /* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);

Some files were not shown because too many files have changed in this diff Show More