mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
drm fixes for 6.1-rc7
amdgpu: - amdgpu gang submit fix. - DCN 3.1.4 fixes - DP MST DSC deadlock fixes - HMM userptr fixes - Fix Aldebaran CU occupancy reporting - GFX11 fixes - PSP suspend/resume fix - DCE12 KASAN fix - DCN 3.2.x fixes - Rotated cursor fix - SMU 13.x fix - DELL platform suspend/resume fixes - VCN4 SR-IOV fix - Display regression fix for polled connectors i915: - Fix GVT KVM reference count handling - Never purge busy TTM objects - Fix warn in intel_display_power_*_domain() functions dma-buf: - Use dma_fence_unwrap_for_each when importing sync files. - Fix race in dma_heap_add(). fbcon: - Fix use of uninitialized memory in logo. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmOANPQACgkQDHTzWXnE hr4a/A/+JuYIIC0ZtGeY3mcmHIcNQIIeCDmf+/8BGZCp6Dr2Uz62jBhyylbrZ9MQ lHHmV3HI1khC3S5v351zeEzFCfK9JKWs64I3sBuBF04yi49PvCD5+8MpgUPRgPWb N6t319GlIw1X0ktA2tgzekGVJCSaCyZNUD4kvO9PwrLFTpVeqEoCUoYwmqLCt392 u0r9FYrAQLmH208/6DG5PnKHLXO5/VspJT09xz3faZdVAJtLc6Pr+npO2kUsx8Y0 quGtdh0OxhdsExhXWZGSm3DDEMmxqC0e14QX0dXuzOkTfIYRkUzkXLjph99nMEir rpszx0wJrdjbWiL98nXi520KlcNZeeCM6s1WbXXBplgJHAhEXrcBuvNEH9s/SedL qRBdqKtmxpeJkkimAfpqPvKkSmwyfNIZ01Yekrtm9ovmD2T6uO3zjX5411d4iHJ7 JiSwkcnaqR75Tbvq7aoOubQUB3S4JXIqqxuVgQicu7H/wcuImbcaaaaWmxPwIYRi tn0Dwt+GY84bYi47g9DzcslpxFXV36c5aQv2HwdDfUu8/oqOZakgzYiZoxPIlT5K y5TFvInNEhuNExezVRzbxp5asgPy3izEaUrrQIFJ8XchP4N6jr+17x174ix4O019 T4H9PhNybHGSVGWAkuy9+KTIOGbP5vbY1zzmEGjPlkKmv0BFDfM= =o133 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2022-11-25' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Weekly fixes, amdgpu has not quite settled down. Most of the changes are small, and the non-amdgpu ones are all fine. There are a bunch of DP MST DSC fixes that fix some issues introduced in a previous larger MST rework. The biggest one is mainly propagating some error values properly instead of bool returns, and I think it just looks large but doesn't really change anything too much, except propagating errors that are required to avoid deadlocks. I've gone over it and a few others and they've had some decent testing over the last few weeks. Summary: amdgpu: - amdgpu gang submit fix - DCN 3.1.4 fixes - DP MST DSC deadlock fixes - HMM userptr fixes - Fix Aldebaran CU occupancy reporting - GFX11 fixes - PSP suspend/resume fix - DCE12 KASAN fix - DCN 3.2.x fixes - Rotated cursor fix - SMU 13.x fix - DELL platform suspend/resume fixes - VCN4 SR-IOV fix - Display regression fix for polled connectors i915: - Fix GVT KVM reference count handling - Never purge busy TTM objects - Fix warn in intel_display_power_*_domain() functions dma-buf: - Use dma_fence_unwrap_for_each when importing sync files - Fix race in dma_heap_add() fbcon: - Fix use of uninitialized memory in logo" * tag 'drm-fixes-2022-11-25' of git://anongit.freedesktop.org/drm/drm: (30 commits) drm/amdgpu/vcn: re-use original vcn0 doorbell value drm/amdgpu: Partially revert "drm/amdgpu: update drm_display_info correctly when the edid is read" drm/amd/display: No display after resume from WB/CB drm/amdgpu: fix use-after-free during gpu recovery drm/amd/pm: update driver if header for smu_13_0_7 drm/amd/display: Fix rotated cursor offset calculation drm/amd/display: Use new num clk levels struct for max mclk index drm/amd/display: Avoid setting pixel rate divider to N/A drm/amd/display: Use viewport height for subvp mall allocation size drm/amd/display: Update soc bounding box for dcn32/dcn321 drm/amd/dc/dce120: Fix audio register mapping, stop triggering KASAN drm/amdgpu/psp: don't free PSP buffers on suspend fbcon: Use kzalloc() in fbcon_prepare_logo() dma-buf: fix racing conflict of dma_heap_add() drm/amd/amdgpu: reserve vm invalidation engine for firmware drm/amdgpu: Enable Aldebaran devices to report CU Occupancy drm/amdgpu: fix userptr HMM range handling v2 drm/amdgpu: always register an MMU notifier for userptr drm/amdgpu/dm/mst: Fix uninitialized var in pre_compute_mst_dsc_configs_for_state() drm/amdgpu/dm/dp_mst: Don't grab mst_mgr->lock when computing DSC state ...
This commit is contained in:
commit
6fe0e074e7
@ -15,6 +15,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/dma-fence-unwrap.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/debugfs.h>
|
||||
@ -391,8 +392,10 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
|
||||
const void __user *user_data)
|
||||
{
|
||||
struct dma_buf_import_sync_file arg;
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence *fence, *f;
|
||||
enum dma_resv_usage usage;
|
||||
struct dma_fence_unwrap iter;
|
||||
unsigned int num_fences;
|
||||
int ret = 0;
|
||||
|
||||
if (copy_from_user(&arg, user_data, sizeof(arg)))
|
||||
@ -411,13 +414,21 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
|
||||
usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
|
||||
DMA_RESV_USAGE_READ;
|
||||
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
num_fences = 0;
|
||||
dma_fence_unwrap_for_each(f, &iter, fence)
|
||||
++num_fences;
|
||||
|
||||
ret = dma_resv_reserve_fences(dmabuf->resv, 1);
|
||||
if (!ret)
|
||||
dma_resv_add_fence(dmabuf->resv, fence, usage);
|
||||
if (num_fences > 0) {
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
|
||||
if (!ret) {
|
||||
dma_fence_unwrap_for_each(f, &iter, fence)
|
||||
dma_resv_add_fence(dmabuf->resv, f, usage);
|
||||
}
|
||||
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
|
||||
|
@ -233,18 +233,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* check the name is unique */
|
||||
mutex_lock(&heap_list_lock);
|
||||
list_for_each_entry(h, &heap_list, list) {
|
||||
if (!strcmp(h->name, exp_info->name)) {
|
||||
mutex_unlock(&heap_list_lock);
|
||||
pr_err("dma_heap: Already registered heap named %s\n",
|
||||
exp_info->name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&heap_list_lock);
|
||||
|
||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||
if (!heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -283,13 +271,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
|
||||
err_ret = ERR_CAST(dev_ret);
|
||||
goto err2;
|
||||
}
|
||||
/* Add heap to the list */
|
||||
|
||||
mutex_lock(&heap_list_lock);
|
||||
/* check the name is unique */
|
||||
list_for_each_entry(h, &heap_list, list) {
|
||||
if (!strcmp(h->name, exp_info->name)) {
|
||||
mutex_unlock(&heap_list_lock);
|
||||
pr_err("dma_heap: Already registered heap named %s\n",
|
||||
exp_info->name);
|
||||
err_ret = ERR_PTR(-EINVAL);
|
||||
goto err3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add heap to the list */
|
||||
list_add(&heap->list, &heap_list);
|
||||
mutex_unlock(&heap_list_lock);
|
||||
|
||||
return heap;
|
||||
|
||||
err3:
|
||||
device_destroy(dma_heap_class, heap->heap_devt);
|
||||
err2:
|
||||
cdev_del(&heap->heap_cdev);
|
||||
err1:
|
||||
|
@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
|
||||
};
|
||||
|
@ -986,6 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
|
||||
struct amdkfd_process_info *process_info = mem->process_info;
|
||||
struct amdgpu_bo *bo = mem->bo;
|
||||
struct ttm_operation_ctx ctx = { true, false };
|
||||
struct hmm_range *range;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&process_info->lock);
|
||||
@ -1015,7 +1016,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
|
||||
goto unregister_out;
|
||||
@ -1033,7 +1034,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
|
||||
amdgpu_bo_unreserve(bo);
|
||||
|
||||
release_out:
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
|
||||
unregister_out:
|
||||
if (ret)
|
||||
amdgpu_mn_unregister(bo);
|
||||
@ -2370,6 +2371,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
/* Go through userptr_inval_list and update any invalid user_pages */
|
||||
list_for_each_entry(mem, &process_info->userptr_inval_list,
|
||||
validate_list.head) {
|
||||
struct hmm_range *range;
|
||||
|
||||
invalid = atomic_read(&mem->invalid);
|
||||
if (!invalid)
|
||||
/* BO hasn't been invalidated since the last
|
||||
@ -2380,7 +2383,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
bo = mem->bo;
|
||||
|
||||
/* Get updated user pages */
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
|
||||
&range);
|
||||
if (ret) {
|
||||
pr_debug("Failed %d to get user pages\n", ret);
|
||||
|
||||
@ -2399,7 +2403,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
* FIXME: Cannot ignore the return code, must hold
|
||||
* notifier_lock
|
||||
*/
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
|
||||
}
|
||||
|
||||
/* Mark the BO as valid unless it was invalidated
|
||||
|
@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
||||
list_add_tail(&e->tv.head, &bucket[priority]);
|
||||
|
||||
e->user_pages = NULL;
|
||||
e->range = NULL;
|
||||
}
|
||||
|
||||
/* Connect the sorted buckets in the output list. */
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
|
||||
struct hmm_range;
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_bo;
|
||||
struct amdgpu_bo_va;
|
||||
@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry {
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
uint32_t priority;
|
||||
struct page **user_pages;
|
||||
struct hmm_range *range;
|
||||
bool user_invalidated;
|
||||
};
|
||||
|
||||
|
@ -328,7 +328,6 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
|
||||
|
||||
kfree(amdgpu_connector->edid);
|
||||
amdgpu_connector->edid = NULL;
|
||||
drm_connector_update_edid_property(connector, NULL);
|
||||
}
|
||||
|
||||
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
|
||||
|
@ -913,7 +913,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
goto out_free_user_pages;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
|
||||
if (r) {
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
@ -991,9 +991,10 @@ out_free_user_pages:
|
||||
|
||||
if (!e->user_pages)
|
||||
continue;
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
e->range = NULL;
|
||||
}
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
return r;
|
||||
@ -1273,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
|
||||
r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
|
||||
e->range = NULL;
|
||||
}
|
||||
if (r) {
|
||||
r = -EAGAIN;
|
||||
|
@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct drm_amdgpu_gem_userptr *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct hmm_range *range;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
@ -413,14 +414,13 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
|
||||
r = amdgpu_mn_register(bo, args->addr);
|
||||
if (r)
|
||||
goto release_object;
|
||||
}
|
||||
r = amdgpu_mn_register(bo, args->addr);
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
|
||||
&range);
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
@ -443,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
user_pages_done:
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
|
||||
|
||||
release_object:
|
||||
drm_gem_object_put(gobj);
|
||||
|
@ -479,6 +479,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
unsigned i;
|
||||
unsigned vmhub, inv_eng;
|
||||
|
||||
if (adev->enable_mes) {
|
||||
/* reserve engine 5 for firmware */
|
||||
for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++)
|
||||
vm_inv_engs[vmhub] &= ~(1 << 5);
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_rings; ++i) {
|
||||
ring = adev->rings[i];
|
||||
vmhub = ring->funcs->vmhub;
|
||||
|
@ -169,7 +169,11 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
|
||||
dma_fence_put(&job->hw_fence);
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (!job->hw_fence.ops)
|
||||
kfree(job);
|
||||
else
|
||||
dma_fence_put(&job->hw_fence);
|
||||
}
|
||||
|
||||
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
|
||||
@ -254,6 +258,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
DRM_ERROR("Error adding fence (%d)\n", r);
|
||||
}
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
|
||||
|
||||
while (fence == NULL && vm && !job->vmid) {
|
||||
r = amdgpu_vmid_grab(vm, ring, &job->sync,
|
||||
&job->base.s_fence->finished,
|
||||
@ -264,9 +271,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
fence = amdgpu_sync_get_fence(&job->sync);
|
||||
}
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
|
@ -172,6 +172,7 @@ void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
|
||||
&mem_ctx->shared_buf);
|
||||
mem_ctx->shared_bo = NULL;
|
||||
}
|
||||
|
||||
static void psp_free_shared_bufs(struct psp_context *psp)
|
||||
@ -182,6 +183,7 @@ static void psp_free_shared_bufs(struct psp_context *psp)
|
||||
/* free TMR memory buffer */
|
||||
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
|
||||
psp->tmr_bo = NULL;
|
||||
|
||||
/* free xgmi shared memory */
|
||||
psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
|
||||
@ -743,7 +745,7 @@ static int psp_load_toc(struct psp_context *psp,
|
||||
/* Set up Trusted Memory Region */
|
||||
static int psp_tmr_init(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int tmr_size;
|
||||
void *tmr_buf;
|
||||
void **pptr;
|
||||
@ -770,10 +772,12 @@ static int psp_tmr_init(struct psp_context *psp)
|
||||
}
|
||||
}
|
||||
|
||||
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
|
||||
if (!psp->tmr_bo) {
|
||||
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2732,8 +2736,6 @@ static int psp_suspend(void *handle)
|
||||
}
|
||||
|
||||
out:
|
||||
psp_free_shared_bufs(psp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -643,9 +643,6 @@ struct amdgpu_ttm_tt {
|
||||
struct task_struct *usertask;
|
||||
uint32_t userflags;
|
||||
bool bound;
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
struct hmm_range *range;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
|
||||
@ -658,7 +655,8 @@ struct amdgpu_ttm_tt {
|
||||
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
|
||||
* once afterwards to stop HMM tracking
|
||||
*/
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
|
||||
struct hmm_range **range)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
bool readonly;
|
||||
int r = 0;
|
||||
|
||||
/* Make sure get_user_pages_done() can cleanup gracefully */
|
||||
*range = NULL;
|
||||
|
||||
mm = bo->notifier.mm;
|
||||
if (unlikely(!mm)) {
|
||||
DRM_DEBUG_DRIVER("BO is not registered?\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Another get_user_pages is running at the same time?? */
|
||||
if (WARN_ON(gtt->range))
|
||||
return -EFAULT;
|
||||
|
||||
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
|
||||
return -ESRCH;
|
||||
|
||||
@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
|
||||
readonly = amdgpu_ttm_tt_is_readonly(ttm);
|
||||
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
|
||||
ttm->num_pages, >t->range, readonly,
|
||||
ttm->num_pages, range, readonly,
|
||||
true, NULL);
|
||||
out_unlock:
|
||||
mmap_read_unlock(mm);
|
||||
@ -713,30 +710,24 @@ out_unlock:
|
||||
*
|
||||
* Returns: true if pages are still valid
|
||||
*/
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
struct hmm_range *range)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
bool r = false;
|
||||
|
||||
if (!gtt || !gtt->userptr)
|
||||
if (!gtt || !gtt->userptr || !range)
|
||||
return false;
|
||||
|
||||
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
|
||||
gtt->userptr, ttm->num_pages);
|
||||
|
||||
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
|
||||
"No user pages to check\n");
|
||||
WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
|
||||
|
||||
if (gtt->range) {
|
||||
/*
|
||||
* FIXME: Must always hold notifier_lock for this, and must
|
||||
* not ignore the return code.
|
||||
*/
|
||||
r = amdgpu_hmm_range_get_pages_done(gtt->range);
|
||||
gtt->range = NULL;
|
||||
}
|
||||
|
||||
return !r;
|
||||
/*
|
||||
* FIXME: Must always hold notifier_lock for this, and must
|
||||
* not ignore the return code.
|
||||
*/
|
||||
return !amdgpu_hmm_range_get_pages_done(range);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
|
||||
/* unmap the pages mapped to the device */
|
||||
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
|
||||
sg_free_table(ttm->sg);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
if (gtt->range) {
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
if (ttm->pages[i] !=
|
||||
hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
|
||||
break;
|
||||
}
|
||||
|
||||
WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
||||
|
@ -39,6 +39,8 @@
|
||||
|
||||
#define AMDGPU_POISON 0xd0bed0be
|
||||
|
||||
struct hmm_range;
|
||||
|
||||
struct amdgpu_gtt_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
struct drm_mm mm;
|
||||
@ -149,15 +151,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
|
||||
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
|
||||
struct hmm_range **range);
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
struct hmm_range *range);
|
||||
#else
|
||||
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
struct page **pages)
|
||||
struct page **pages,
|
||||
struct hmm_range **range)
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
|
||||
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
struct hmm_range *range)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -32,7 +32,6 @@
|
||||
|
||||
#define RB_ENABLED (1 << 0)
|
||||
#define RB4_ENABLED (1 << 1)
|
||||
#define MMSCH_DOORBELL_OFFSET 0x8
|
||||
|
||||
#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
|
||||
|
||||
|
@ -100,7 +100,6 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i, r;
|
||||
int vcn_doorbell_index = 0;
|
||||
|
||||
r = amdgpu_vcn_sw_init(adev);
|
||||
if (r)
|
||||
@ -112,12 +111,6 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
|
||||
/* get DWORD offset */
|
||||
vcn_doorbell_index = vcn_doorbell_index << 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
|
||||
|
||||
@ -135,7 +128,7 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
ring->use_doorbell = true;
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
|
||||
else
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
|
||||
|
||||
|
@ -1372,7 +1372,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
/* TODO: refactor this from a fixed table to a dynamic option */
|
||||
};
|
||||
|
||||
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
|
||||
@ -6475,7 +6512,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
|
||||
struct drm_connector_state *new_con_state;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct dm_connector_state *dm_conn_state;
|
||||
int i, j;
|
||||
int i, j, ret;
|
||||
int vcpi, pbn_div, pbn, slot_num = 0;
|
||||
|
||||
for_each_new_connector_in_state(state, connector, new_con_state, i) {
|
||||
@ -6522,8 +6559,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
|
||||
dm_conn_state->pbn = pbn;
|
||||
dm_conn_state->vcpi_slots = slot_num;
|
||||
|
||||
drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
|
||||
false);
|
||||
ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
|
||||
dm_conn_state->pbn, false);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -9537,10 +9577,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dc_resource_is_dsc_encoding_supported(dc)) {
|
||||
if (!pre_validate_dsc(state, &dm_state, vars)) {
|
||||
ret = -EINVAL;
|
||||
ret = pre_validate_dsc(state, &dm_state, vars);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -9635,9 +9674,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
|
||||
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -703,13 +703,13 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
||||
return dsc_config.bits_per_pixel;
|
||||
}
|
||||
|
||||
static bool increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_state *mst_state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count,
|
||||
int k)
|
||||
static int increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_state *mst_state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count,
|
||||
int k)
|
||||
{
|
||||
int i;
|
||||
bool bpp_increased[MAX_PIPES];
|
||||
@ -719,6 +719,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
int remaining_to_increase = 0;
|
||||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
@ -757,52 +758,60 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
|
||||
if (initial_slack[next_index] > fair_pbn_alloc) {
|
||||
vars[next_index].pbn += fair_pbn_alloc;
|
||||
if (drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn) < 0)
|
||||
return false;
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret == 0) {
|
||||
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
|
||||
} else {
|
||||
vars[next_index].pbn -= fair_pbn_alloc;
|
||||
if (drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
vars[next_index].pbn += initial_slack[next_index];
|
||||
if (drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn) < 0)
|
||||
return false;
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret == 0) {
|
||||
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
|
||||
} else {
|
||||
vars[next_index].pbn -= initial_slack[next_index];
|
||||
if (drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
bpp_increased[next_index] = true;
|
||||
remaining_to_increase--;
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool try_disable_dsc(struct drm_atomic_state *state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count,
|
||||
int k)
|
||||
static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count,
|
||||
int k)
|
||||
{
|
||||
int i;
|
||||
bool tried[MAX_PIPES];
|
||||
@ -810,6 +819,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
|
||||
int max_kbps_increase;
|
||||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled
|
||||
@ -840,49 +850,52 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
|
||||
break;
|
||||
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
|
||||
if (drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret == 0) {
|
||||
vars[next_index].dsc_enabled = false;
|
||||
vars[next_index].bpp_x16 = 0;
|
||||
} else {
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
|
||||
if (drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
tried[next_index] = true;
|
||||
remaining_to_try--;
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
int *link_vars_start_index)
|
||||
static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
int *link_vars_start_index)
|
||||
{
|
||||
struct dc_stream_state *stream;
|
||||
struct dsc_mst_fairness_params params[MAX_PIPES];
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
|
||||
int count = 0;
|
||||
int i, k;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
||||
if (IS_ERR(mst_state))
|
||||
return false;
|
||||
return PTR_ERR(mst_state);
|
||||
|
||||
mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
@ -933,7 +946,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
|
||||
if (count == 0) {
|
||||
ASSERT(0);
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* k is start index of vars for current phy link used by mst hub */
|
||||
@ -947,13 +960,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
vars[i + k].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
vars[i + k].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret == 0 && !debugfs_overwrite) {
|
||||
set_dsc_configs_from_fairness_vars(params, vars, count, k);
|
||||
return true;
|
||||
return 0;
|
||||
} else if (ret != -ENOSPC) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Try max compression */
|
||||
@ -962,31 +979,36 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
params[i].port, vars[i + k].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
params[i].port, vars[i + k].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
params[i].port, vars[i + k].pbn) < 0)
|
||||
return false;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
params[i].port, vars[i + k].pbn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (drm_dp_mst_atomic_check(state))
|
||||
return false;
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
/* Optimize degree of compression */
|
||||
if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
|
||||
return false;
|
||||
ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!try_disable_dsc(state, dc_link, params, vars, count, k))
|
||||
return false;
|
||||
ret = try_disable_dsc(state, dc_link, params, vars, count, k);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
set_dsc_configs_from_fairness_vars(params, vars, count, k);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_dsc_need_re_compute(
|
||||
@ -1087,15 +1109,17 @@ static bool is_dsc_need_re_compute(
|
||||
return is_dsc_need_re_compute;
|
||||
}
|
||||
|
||||
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dsc_mst_fairness_vars *vars)
|
||||
int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dsc_mst_fairness_vars *vars)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_stream_state *stream;
|
||||
bool computed_streams[MAX_PIPES];
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
int link_vars_start_index = 0;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++)
|
||||
computed_streams[i] = false;
|
||||
@ -1108,7 +1132,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
if (!aconnector || !aconnector->dc_sink)
|
||||
if (!aconnector || !aconnector->dc_sink || !aconnector->port)
|
||||
continue;
|
||||
|
||||
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
|
||||
@ -1118,19 +1142,16 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
|
||||
return false;
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
|
||||
continue;
|
||||
|
||||
mutex_lock(&aconnector->mst_mgr.lock);
|
||||
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
|
||||
&aconnector->mst_mgr,
|
||||
&link_vars_start_index)) {
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
return false;
|
||||
}
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
mst_mgr = aconnector->port->mgr;
|
||||
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
|
||||
&link_vars_start_index);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
for (j = 0; j < dc_state->stream_count; j++) {
|
||||
if (dc_state->streams[j]->link == stream->link)
|
||||
@ -1143,22 +1164,23 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
|
||||
if (stream->timing.flags.DSC == 1)
|
||||
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dsc_mst_fairness_vars *vars)
|
||||
static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dsc_mst_fairness_vars *vars)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_stream_state *stream;
|
||||
bool computed_streams[MAX_PIPES];
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
int link_vars_start_index = 0;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++)
|
||||
computed_streams[i] = false;
|
||||
@ -1171,7 +1193,7 @@ static bool
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
if (!aconnector || !aconnector->dc_sink)
|
||||
if (!aconnector || !aconnector->dc_sink || !aconnector->port)
|
||||
continue;
|
||||
|
||||
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
|
||||
@ -1183,14 +1205,11 @@ static bool
|
||||
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
|
||||
continue;
|
||||
|
||||
mutex_lock(&aconnector->mst_mgr.lock);
|
||||
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
|
||||
&aconnector->mst_mgr,
|
||||
&link_vars_start_index)) {
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
return false;
|
||||
}
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
mst_mgr = aconnector->port->mgr;
|
||||
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
|
||||
&link_vars_start_index);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
for (j = 0; j < dc_state->stream_count; j++) {
|
||||
if (dc_state->streams[j]->link == stream->link)
|
||||
@ -1198,7 +1217,7 @@ static bool
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
|
||||
@ -1253,9 +1272,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool pre_validate_dsc(struct drm_atomic_state *state,
|
||||
struct dm_atomic_state **dm_state_ptr,
|
||||
struct dsc_mst_fairness_vars *vars)
|
||||
int pre_validate_dsc(struct drm_atomic_state *state,
|
||||
struct dm_atomic_state **dm_state_ptr,
|
||||
struct dsc_mst_fairness_vars *vars)
|
||||
{
|
||||
int i;
|
||||
struct dm_atomic_state *dm_state;
|
||||
@ -1264,11 +1283,12 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
|
||||
|
||||
if (!is_dsc_precompute_needed(state)) {
|
||||
DRM_INFO_ONCE("DSC precompute is not needed.\n");
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
if (dm_atomic_get_state(state, dm_state_ptr)) {
|
||||
ret = dm_atomic_get_state(state, dm_state_ptr);
|
||||
if (ret != 0) {
|
||||
DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
dm_state = *dm_state_ptr;
|
||||
|
||||
@ -1280,7 +1300,7 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
|
||||
|
||||
local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
|
||||
if (!local_dc_state)
|
||||
return false;
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < local_dc_state->stream_count; i++) {
|
||||
struct dc_stream_state *stream = dm_state->context->streams[i];
|
||||
@ -1316,9 +1336,9 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
|
||||
if (ret != 0)
|
||||
goto clean_exit;
|
||||
|
||||
if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) {
|
||||
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
|
||||
if (ret != 0) {
|
||||
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto clean_exit;
|
||||
}
|
||||
|
||||
@ -1349,7 +1369,7 @@ clean_exit:
|
||||
|
||||
kfree(local_dc_state);
|
||||
|
||||
return (ret == 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int kbps_from_pbn(unsigned int pbn)
|
||||
@ -1392,6 +1412,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
|
||||
unsigned int max_compressed_bw_in_kbps = 0;
|
||||
struct dc_dsc_bw_range bw_range = {0};
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
|
||||
/*
|
||||
* check if the mode could be supported if DSC pass-through is supported
|
||||
@ -1400,7 +1421,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
*/
|
||||
if (is_dsc_common_config_possible(stream, &bw_range) &&
|
||||
aconnector->port->passthrough_aux) {
|
||||
mutex_lock(&aconnector->mst_mgr.lock);
|
||||
mst_mgr = aconnector->port->mgr;
|
||||
mutex_lock(&mst_mgr->lock);
|
||||
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
|
||||
@ -1413,7 +1435,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
|
||||
down_link_bw_in_kbps);
|
||||
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
mutex_unlock(&mst_mgr->lock);
|
||||
|
||||
/*
|
||||
* use the maximum dsc compression bandwidth as the required
|
||||
|
@ -53,15 +53,15 @@ struct dsc_mst_fairness_vars {
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
};
|
||||
|
||||
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dsc_mst_fairness_vars *vars);
|
||||
int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dsc_mst_fairness_vars *vars);
|
||||
|
||||
bool needs_dsc_aux_workaround(struct dc_link *link);
|
||||
|
||||
bool pre_validate_dsc(struct drm_atomic_state *state,
|
||||
struct dm_atomic_state **dm_state_ptr,
|
||||
struct dsc_mst_fairness_vars *vars);
|
||||
int pre_validate_dsc(struct drm_atomic_state *state,
|
||||
struct dm_atomic_state **dm_state_ptr,
|
||||
struct dsc_mst_fairness_vars *vars);
|
||||
|
||||
enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
struct amdgpu_dm_connector *aconnector,
|
||||
|
@ -123,9 +123,10 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
uint32_t result;
|
||||
|
||||
result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
if (result != VBIOSSMC_Result_OK)
|
||||
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n",
|
||||
result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY)
|
||||
return -1;
|
||||
@ -216,6 +217,12 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
#ifdef DBG
|
||||
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
|
||||
actual_dcfclk_set_mhz,
|
||||
actual_dcfclk_set_mhz * 1000);
|
||||
#endif
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,8 @@ static const struct dce_audio_registers audio_regs[] = {
|
||||
audio_regs(2),
|
||||
audio_regs(3),
|
||||
audio_regs(4),
|
||||
audio_regs(5)
|
||||
audio_regs(5),
|
||||
audio_regs(6),
|
||||
};
|
||||
|
||||
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
|
||||
|
@ -436,34 +436,48 @@ void dpp1_set_cursor_position(
|
||||
uint32_t height)
|
||||
{
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
|
||||
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
|
||||
int x_pos = pos->x - param->viewport.x;
|
||||
int y_pos = pos->y - param->viewport.y;
|
||||
int x_hotspot = pos->x_hotspot;
|
||||
int y_hotspot = pos->y_hotspot;
|
||||
int src_x_offset = x_pos - pos->x_hotspot;
|
||||
int src_y_offset = y_pos - pos->y_hotspot;
|
||||
int cursor_height = (int)height;
|
||||
int cursor_width = (int)width;
|
||||
uint32_t cur_en = pos->enable ? 1 : 0;
|
||||
|
||||
// Cursor width/height and hotspots need to be rotated for offset calculation
|
||||
// Transform cursor width / height and hotspots for offset calculations
|
||||
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
|
||||
swap(width, height);
|
||||
swap(cursor_height, cursor_width);
|
||||
swap(x_hotspot, y_hotspot);
|
||||
|
||||
if (param->rotation == ROTATION_ANGLE_90) {
|
||||
src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
|
||||
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
|
||||
// hotspot = (-y, x)
|
||||
src_x_offset = x_pos - (cursor_width - x_hotspot);
|
||||
src_y_offset = y_pos - y_hotspot;
|
||||
} else if (param->rotation == ROTATION_ANGLE_270) {
|
||||
// hotspot = (y, -x)
|
||||
src_x_offset = x_pos - x_hotspot;
|
||||
src_y_offset = y_pos - (cursor_height - y_hotspot);
|
||||
}
|
||||
} else if (param->rotation == ROTATION_ANGLE_180) {
|
||||
// hotspot = (-x, -y)
|
||||
if (!param->mirror)
|
||||
src_x_offset = pos->x - param->viewport.x;
|
||||
src_x_offset = x_pos - (cursor_width - x_hotspot);
|
||||
|
||||
src_y_offset = pos->y - param->viewport.y;
|
||||
src_y_offset = y_pos - (cursor_height - y_hotspot);
|
||||
}
|
||||
|
||||
if (src_x_offset >= (int)param->viewport.width)
|
||||
cur_en = 0; /* not visible beyond right edge*/
|
||||
|
||||
if (src_x_offset + (int)width <= 0)
|
||||
if (src_x_offset + cursor_width <= 0)
|
||||
cur_en = 0; /* not visible beyond left edge*/
|
||||
|
||||
if (src_y_offset >= (int)param->viewport.height)
|
||||
cur_en = 0; /* not visible beyond bottom edge*/
|
||||
|
||||
if (src_y_offset + (int)height <= 0)
|
||||
if (src_y_offset + cursor_height <= 0)
|
||||
cur_en = 0; /* not visible beyond top edge*/
|
||||
|
||||
REG_UPDATE(CURSOR0_CONTROL,
|
||||
|
@ -1179,10 +1179,12 @@ void hubp1_cursor_set_position(
|
||||
const struct dc_cursor_mi_param *param)
|
||||
{
|
||||
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
|
||||
int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
|
||||
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
|
||||
int x_pos = pos->x - param->viewport.x;
|
||||
int y_pos = pos->y - param->viewport.y;
|
||||
int x_hotspot = pos->x_hotspot;
|
||||
int y_hotspot = pos->y_hotspot;
|
||||
int src_x_offset = x_pos - pos->x_hotspot;
|
||||
int src_y_offset = y_pos - pos->y_hotspot;
|
||||
int cursor_height = (int)hubp->curs_attr.height;
|
||||
int cursor_width = (int)hubp->curs_attr.width;
|
||||
uint32_t dst_x_offset;
|
||||
@ -1200,18 +1202,26 @@ void hubp1_cursor_set_position(
|
||||
if (hubp->curs_attr.address.quad_part == 0)
|
||||
return;
|
||||
|
||||
// Rotated cursor width/height and hotspots tweaks for offset calculation
|
||||
// Transform cursor width / height and hotspots for offset calculations
|
||||
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
|
||||
swap(cursor_height, cursor_width);
|
||||
swap(x_hotspot, y_hotspot);
|
||||
|
||||
if (param->rotation == ROTATION_ANGLE_90) {
|
||||
src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
|
||||
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
|
||||
// hotspot = (-y, x)
|
||||
src_x_offset = x_pos - (cursor_width - x_hotspot);
|
||||
src_y_offset = y_pos - y_hotspot;
|
||||
} else if (param->rotation == ROTATION_ANGLE_270) {
|
||||
// hotspot = (y, -x)
|
||||
src_x_offset = x_pos - x_hotspot;
|
||||
src_y_offset = y_pos - (cursor_height - y_hotspot);
|
||||
}
|
||||
} else if (param->rotation == ROTATION_ANGLE_180) {
|
||||
// hotspot = (-x, -y)
|
||||
if (!param->mirror)
|
||||
src_x_offset = pos->x - param->viewport.x;
|
||||
src_x_offset = x_pos - (cursor_width - x_hotspot);
|
||||
|
||||
src_y_offset = pos->y - param->viewport.y;
|
||||
src_y_offset = y_pos - (cursor_height - y_hotspot);
|
||||
}
|
||||
|
||||
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
|
||||
@ -1248,8 +1258,8 @@ void hubp1_cursor_set_position(
|
||||
CURSOR_Y_POSITION, pos->y);
|
||||
|
||||
REG_SET_2(CURSOR_HOT_SPOT, 0,
|
||||
CURSOR_HOT_SPOT_X, x_hotspot,
|
||||
CURSOR_HOT_SPOT_Y, y_hotspot);
|
||||
CURSOR_HOT_SPOT_X, pos->x_hotspot,
|
||||
CURSOR_HOT_SPOT_Y, pos->y_hotspot);
|
||||
|
||||
REG_SET(CURSOR_DST_OFFSET, 0,
|
||||
CURSOR_DST_X_OFFSET, dst_x_offset);
|
||||
|
@ -973,10 +973,12 @@ void hubp2_cursor_set_position(
|
||||
const struct dc_cursor_mi_param *param)
|
||||
{
|
||||
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
|
||||
int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
|
||||
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
|
||||
int x_pos = pos->x - param->viewport.x;
|
||||
int y_pos = pos->y - param->viewport.y;
|
||||
int x_hotspot = pos->x_hotspot;
|
||||
int y_hotspot = pos->y_hotspot;
|
||||
int src_x_offset = x_pos - pos->x_hotspot;
|
||||
int src_y_offset = y_pos - pos->y_hotspot;
|
||||
int cursor_height = (int)hubp->curs_attr.height;
|
||||
int cursor_width = (int)hubp->curs_attr.width;
|
||||
uint32_t dst_x_offset;
|
||||
@ -994,18 +996,26 @@ void hubp2_cursor_set_position(
|
||||
if (hubp->curs_attr.address.quad_part == 0)
|
||||
return;
|
||||
|
||||
// Rotated cursor width/height and hotspots tweaks for offset calculation
|
||||
// Transform cursor width / height and hotspots for offset calculations
|
||||
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
|
||||
swap(cursor_height, cursor_width);
|
||||
swap(x_hotspot, y_hotspot);
|
||||
|
||||
if (param->rotation == ROTATION_ANGLE_90) {
|
||||
src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
|
||||
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
|
||||
// hotspot = (-y, x)
|
||||
src_x_offset = x_pos - (cursor_width - x_hotspot);
|
||||
src_y_offset = y_pos - y_hotspot;
|
||||
} else if (param->rotation == ROTATION_ANGLE_270) {
|
||||
// hotspot = (y, -x)
|
||||
src_x_offset = x_pos - x_hotspot;
|
||||
src_y_offset = y_pos - (cursor_height - y_hotspot);
|
||||
}
|
||||
} else if (param->rotation == ROTATION_ANGLE_180) {
|
||||
// hotspot = (-x, -y)
|
||||
if (!param->mirror)
|
||||
src_x_offset = pos->x - param->viewport.x;
|
||||
src_x_offset = x_pos - (cursor_width - x_hotspot);
|
||||
|
||||
src_y_offset = pos->y - param->viewport.y;
|
||||
src_y_offset = y_pos - (cursor_height - y_hotspot);
|
||||
}
|
||||
|
||||
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
|
||||
@ -1042,8 +1052,8 @@ void hubp2_cursor_set_position(
|
||||
CURSOR_Y_POSITION, pos->y);
|
||||
|
||||
REG_SET_2(CURSOR_HOT_SPOT, 0,
|
||||
CURSOR_HOT_SPOT_X, x_hotspot,
|
||||
CURSOR_HOT_SPOT_Y, y_hotspot);
|
||||
CURSOR_HOT_SPOT_X, pos->x_hotspot,
|
||||
CURSOR_HOT_SPOT_Y, pos->y_hotspot);
|
||||
|
||||
REG_SET(CURSOR_DST_OFFSET, 0,
|
||||
CURSOR_DST_X_OFFSET, dst_x_offset);
|
||||
@ -1052,8 +1062,8 @@ void hubp2_cursor_set_position(
|
||||
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
|
||||
hubp->pos.position.bits.x_pos = pos->x;
|
||||
hubp->pos.position.bits.y_pos = pos->y;
|
||||
hubp->pos.hot_spot.bits.x_hot = x_hotspot;
|
||||
hubp->pos.hot_spot.bits.y_hot = y_hotspot;
|
||||
hubp->pos.hot_spot.bits.x_hot = pos->x_hotspot;
|
||||
hubp->pos.hot_spot.bits.y_hot = pos->y_hotspot;
|
||||
hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
|
||||
/* Cursor Rectangle Cache
|
||||
* Cursor bitmaps have different hotspot values
|
||||
|
@ -96,6 +96,13 @@ static void dccg314_set_pixel_rate_div(
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
// Don't program 0xF into the register field. Not valid since
|
||||
// K1 / K2 field is only 1 / 2 bits wide
|
||||
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
|
||||
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
|
||||
return;
|
||||
|
@ -348,10 +348,8 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
|
||||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
|
||||
return odm_combine_factor;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_1;
|
||||
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
@ -359,7 +357,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
|
||||
*k2_div = PIXEL_RATE_DIV_BY_2;
|
||||
else
|
||||
*k2_div = PIXEL_RATE_DIV_BY_4;
|
||||
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
|
||||
} else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
|
||||
if (two_pix_per_container) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_2;
|
||||
|
@ -96,8 +96,10 @@ static void dccg32_set_pixel_rate_div(
|
||||
|
||||
// Don't program 0xF into the register field. Not valid since
|
||||
// K1 / K2 field is only 1 / 2 bits wide
|
||||
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
|
||||
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
|
||||
if (k1 == cur_k1 && k2 == cur_k2)
|
||||
|
@ -1171,10 +1171,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
|
||||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
|
||||
return odm_combine_factor;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_1;
|
||||
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
|
@ -111,7 +111,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
|
||||
mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
|
||||
|
||||
/* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
|
||||
mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
|
||||
mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
|
||||
mblk_height * mblk_height + mblk_height;
|
||||
|
||||
/* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
|
||||
|
@ -157,7 +157,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
|
||||
.dispclk_dppclk_vco_speed_mhz = 4300.0,
|
||||
.do_urgent_latency_adjustment = true,
|
||||
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
|
||||
};
|
||||
|
||||
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
|
||||
@ -211,7 +211,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
|
||||
/* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
|
||||
if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 38;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
@ -221,7 +221,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
|
||||
@ -1910,7 +1910,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
|
||||
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
|
||||
dm_dram_clock_change_unsupported) {
|
||||
int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries - 1;
|
||||
int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1;
|
||||
|
||||
min_dram_speed_mts =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
|
||||
|
@ -126,9 +126,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
|
||||
.sr_enter_plus_exit_z8_time_us = 320,
|
||||
.writeback_latency_us = 12.0,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 263,
|
||||
.urgent_latency_pixel_data_only_us = 9.35,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 9.35,
|
||||
.urgent_latency_vm_data_only_us = 9.35,
|
||||
.urgent_latency_pixel_data_only_us = 4,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4,
|
||||
.urgent_latency_vm_data_only_us = 4,
|
||||
.fclk_change_latency_us = 20,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.smn_latency_us = 2,
|
||||
@ -156,7 +156,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
|
||||
.dispclk_dppclk_vco_speed_mhz = 4300.0,
|
||||
.do_urgent_latency_adjustment = true,
|
||||
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
|
||||
};
|
||||
|
||||
static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
|
||||
|
@ -25,10 +25,10 @@
|
||||
|
||||
// *** IMPORTANT ***
|
||||
// PMFW TEAM: Always increment the interface version on any change to this file
|
||||
#define SMU13_DRIVER_IF_VERSION 0x2C
|
||||
#define SMU13_DRIVER_IF_VERSION 0x35
|
||||
|
||||
//Increment this version if SkuTable_t or BoardTable_t change
|
||||
#define PPTABLE_VERSION 0x20
|
||||
#define PPTABLE_VERSION 0x27
|
||||
|
||||
#define NUM_GFXCLK_DPM_LEVELS 16
|
||||
#define NUM_SOCCLK_DPM_LEVELS 8
|
||||
@ -96,7 +96,7 @@
|
||||
#define FEATURE_MEM_TEMP_READ_BIT 47
|
||||
#define FEATURE_ATHUB_MMHUB_PG_BIT 48
|
||||
#define FEATURE_SOC_PCC_BIT 49
|
||||
#define FEATURE_SPARE_50_BIT 50
|
||||
#define FEATURE_EDC_PWRBRK_BIT 50
|
||||
#define FEATURE_SPARE_51_BIT 51
|
||||
#define FEATURE_SPARE_52_BIT 52
|
||||
#define FEATURE_SPARE_53_BIT 53
|
||||
@ -282,15 +282,15 @@ typedef enum {
|
||||
} I2cControllerPort_e;
|
||||
|
||||
typedef enum {
|
||||
I2C_CONTROLLER_NAME_VR_GFX = 0,
|
||||
I2C_CONTROLLER_NAME_VR_SOC,
|
||||
I2C_CONTROLLER_NAME_VR_VMEMP,
|
||||
I2C_CONTROLLER_NAME_VR_VDDIO,
|
||||
I2C_CONTROLLER_NAME_LIQUID0,
|
||||
I2C_CONTROLLER_NAME_LIQUID1,
|
||||
I2C_CONTROLLER_NAME_PLX,
|
||||
I2C_CONTROLLER_NAME_OTHER,
|
||||
I2C_CONTROLLER_NAME_COUNT,
|
||||
I2C_CONTROLLER_NAME_VR_GFX = 0,
|
||||
I2C_CONTROLLER_NAME_VR_SOC,
|
||||
I2C_CONTROLLER_NAME_VR_VMEMP,
|
||||
I2C_CONTROLLER_NAME_VR_VDDIO,
|
||||
I2C_CONTROLLER_NAME_LIQUID0,
|
||||
I2C_CONTROLLER_NAME_LIQUID1,
|
||||
I2C_CONTROLLER_NAME_PLX,
|
||||
I2C_CONTROLLER_NAME_FAN_INTAKE,
|
||||
I2C_CONTROLLER_NAME_COUNT,
|
||||
} I2cControllerName_e;
|
||||
|
||||
typedef enum {
|
||||
@ -302,6 +302,7 @@ typedef enum {
|
||||
I2C_CONTROLLER_THROTTLER_LIQUID0,
|
||||
I2C_CONTROLLER_THROTTLER_LIQUID1,
|
||||
I2C_CONTROLLER_THROTTLER_PLX,
|
||||
I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
|
||||
I2C_CONTROLLER_THROTTLER_INA3221,
|
||||
I2C_CONTROLLER_THROTTLER_COUNT,
|
||||
} I2cControllerThrottler_e;
|
||||
@ -309,8 +310,9 @@ typedef enum {
|
||||
typedef enum {
|
||||
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
|
||||
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
|
||||
I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
|
||||
I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
|
||||
I2C_CONTROLLER_PROTOCOL_INA3221,
|
||||
I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
|
||||
I2C_CONTROLLER_PROTOCOL_COUNT,
|
||||
} I2cControllerProtocol_e;
|
||||
|
||||
@ -690,6 +692,9 @@ typedef struct {
|
||||
#define PP_OD_FEATURE_UCLK_BIT 8
|
||||
#define PP_OD_FEATURE_ZERO_FAN_BIT 9
|
||||
#define PP_OD_FEATURE_TEMPERATURE_BIT 10
|
||||
#define PP_OD_FEATURE_POWER_FEATURE_CTRL_BIT 11
|
||||
#define PP_OD_FEATURE_ASIC_TDC_BIT 12
|
||||
#define PP_OD_FEATURE_COUNT 13
|
||||
|
||||
typedef enum {
|
||||
PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
|
||||
@ -697,6 +702,11 @@ typedef enum {
|
||||
PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
|
||||
} PP_OD_POWER_FEATURE_e;
|
||||
|
||||
typedef enum {
|
||||
FAN_MODE_AUTO = 0,
|
||||
FAN_MODE_MANUAL_LINEAR,
|
||||
} FanMode_e;
|
||||
|
||||
typedef struct {
|
||||
uint32_t FeatureCtrlMask;
|
||||
|
||||
@ -708,8 +718,8 @@ typedef struct {
|
||||
uint8_t RuntimePwrSavingFeaturesCtrl;
|
||||
|
||||
//Frequency changes
|
||||
int16_t GfxclkFmin; // MHz
|
||||
int16_t GfxclkFmax; // MHz
|
||||
int16_t GfxclkFmin; // MHz
|
||||
int16_t GfxclkFmax; // MHz
|
||||
uint16_t UclkFmin; // MHz
|
||||
uint16_t UclkFmax; // MHz
|
||||
|
||||
@ -730,7 +740,12 @@ typedef struct {
|
||||
uint8_t MaxOpTemp;
|
||||
uint8_t Padding[4];
|
||||
|
||||
uint32_t Spare[12];
|
||||
uint16_t GfxVoltageFullCtrlMode;
|
||||
uint16_t GfxclkFullCtrlMode;
|
||||
uint16_t UclkFullCtrlMode;
|
||||
int16_t AsicTdc;
|
||||
|
||||
uint32_t Spare[10];
|
||||
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
|
||||
} OverDriveTable_t;
|
||||
|
||||
@ -748,8 +763,8 @@ typedef struct {
|
||||
uint8_t IdlePwrSavingFeaturesCtrl;
|
||||
uint8_t RuntimePwrSavingFeaturesCtrl;
|
||||
|
||||
uint16_t GfxclkFmin; // MHz
|
||||
uint16_t GfxclkFmax; // MHz
|
||||
int16_t GfxclkFmin; // MHz
|
||||
int16_t GfxclkFmax; // MHz
|
||||
uint16_t UclkFmin; // MHz
|
||||
uint16_t UclkFmax; // MHz
|
||||
|
||||
@ -769,7 +784,12 @@ typedef struct {
|
||||
uint8_t MaxOpTemp;
|
||||
uint8_t Padding[4];
|
||||
|
||||
uint32_t Spare[12];
|
||||
uint16_t GfxVoltageFullCtrlMode;
|
||||
uint16_t GfxclkFullCtrlMode;
|
||||
uint16_t UclkFullCtrlMode;
|
||||
int16_t AsicTdc;
|
||||
|
||||
uint32_t Spare[10];
|
||||
|
||||
} OverDriveLimits_t;
|
||||
|
||||
@ -903,7 +923,8 @@ typedef struct {
|
||||
uint16_t FanStartTempMin;
|
||||
uint16_t FanStartTempMax;
|
||||
|
||||
uint32_t Spare[12];
|
||||
uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
|
||||
uint32_t Spare[11];
|
||||
|
||||
} MsgLimits_t;
|
||||
|
||||
@ -1086,11 +1107,13 @@ typedef struct {
|
||||
uint32_t GfxoffSpare[15];
|
||||
|
||||
// GFX GPO
|
||||
float DfllBtcMasterScalerM;
|
||||
uint32_t DfllBtcMasterScalerM;
|
||||
int32_t DfllBtcMasterScalerB;
|
||||
float DfllBtcSlaveScalerM;
|
||||
uint32_t DfllBtcSlaveScalerM;
|
||||
int32_t DfllBtcSlaveScalerB;
|
||||
uint32_t GfxGpoSpare[12];
|
||||
uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
|
||||
uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
|
||||
uint32_t GfxGpoSpare[10];
|
||||
|
||||
// GFX DCS
|
||||
|
||||
@ -1106,7 +1129,10 @@ typedef struct {
|
||||
uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
|
||||
|
||||
|
||||
uint32_t DcsSpare[16];
|
||||
uint32_t DcsSpare[14];
|
||||
|
||||
// UCLK section
|
||||
uint16_t ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS]; // In MHz
|
||||
|
||||
// UCLK section
|
||||
uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
|
||||
@ -1163,13 +1189,14 @@ typedef struct {
|
||||
uint16_t IntakeTempHighIntakeAcousticLimit;
|
||||
uint16_t IntakeTempAcouticLimitReleaseRate;
|
||||
|
||||
uint16_t FanStalledTempLimitOffset;
|
||||
int16_t FanAbnormalTempLimitOffset;
|
||||
uint16_t FanStalledTriggerRpm;
|
||||
uint16_t FanAbnormalTriggerRpm;
|
||||
uint16_t FanPadding;
|
||||
|
||||
uint32_t FanSpare[14];
|
||||
uint16_t FanAbnormalTriggerRpmCoeff;
|
||||
uint16_t FanAbnormalDetectionEnable;
|
||||
|
||||
uint8_t FanIntakeSensorSupport;
|
||||
uint8_t FanIntakePadding[3];
|
||||
uint32_t FanSpare[13];
|
||||
// SECTION: VDD_GFX AVFS
|
||||
|
||||
uint8_t OverrideGfxAvfsFuses;
|
||||
@ -1193,7 +1220,6 @@ typedef struct {
|
||||
uint32_t dGbV_dT_vmin;
|
||||
uint32_t dGbV_dT_vmax;
|
||||
|
||||
//Unused: PMFW-9370
|
||||
uint32_t V2F_vmin_range_low;
|
||||
uint32_t V2F_vmin_range_high;
|
||||
uint32_t V2F_vmax_range_low;
|
||||
@ -1238,8 +1264,21 @@ typedef struct {
|
||||
// SECTION: Advanced Options
|
||||
uint32_t DebugOverrides;
|
||||
|
||||
// Section: Total Board Power idle vs active coefficients
|
||||
uint8_t TotalBoardPowerSupport;
|
||||
uint8_t TotalBoardPowerPadding[3];
|
||||
|
||||
int16_t TotalIdleBoardPowerM;
|
||||
int16_t TotalIdleBoardPowerB;
|
||||
int16_t TotalBoardPowerM;
|
||||
int16_t TotalBoardPowerB;
|
||||
|
||||
QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
|
||||
QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
|
||||
QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
|
||||
|
||||
// SECTION: Sku Reserved
|
||||
uint32_t Spare[64];
|
||||
uint32_t Spare[43];
|
||||
|
||||
// Padding for MMHUB - do not modify this
|
||||
uint32_t MmHubPadding[8];
|
||||
@ -1304,7 +1343,8 @@ typedef struct {
|
||||
// SECTION: Clock Spread Spectrum
|
||||
|
||||
// UCLK Spread Spectrum
|
||||
uint16_t UclkSpreadPadding;
|
||||
uint8_t UclkTrainingModeSpreadPercent; // Q4.4
|
||||
uint8_t UclkSpreadPadding;
|
||||
uint16_t UclkSpreadFreq; // kHz
|
||||
|
||||
// UCLK Spread Spectrum
|
||||
@ -1317,11 +1357,7 @@ typedef struct {
|
||||
|
||||
// Section: Memory Config
|
||||
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
|
||||
uint8_t PaddingMem1[3];
|
||||
|
||||
// Section: Total Board Power
|
||||
uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
|
||||
uint16_t BoardPowerPadding;
|
||||
uint8_t PaddingMem1[7];
|
||||
|
||||
// SECTION: UMC feature flags
|
||||
uint8_t HsrEnabled;
|
||||
@ -1423,8 +1459,11 @@ typedef struct {
|
||||
uint16_t Vcn1ActivityPercentage ;
|
||||
|
||||
uint32_t EnergyAccumulator;
|
||||
uint16_t AverageSocketPower ;
|
||||
uint16_t AverageSocketPower;
|
||||
uint16_t AverageTotalBoardPower;
|
||||
|
||||
uint16_t AvgTemperature[TEMP_COUNT];
|
||||
uint16_t AvgTemperatureFanIntake;
|
||||
|
||||
uint8_t PcieRate ;
|
||||
uint8_t PcieWidth ;
|
||||
@ -1592,5 +1631,7 @@ typedef struct {
|
||||
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
|
||||
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
|
||||
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
|
||||
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
|
||||
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
|
||||
|
||||
#endif
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
|
||||
|
||||
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
|
||||
|
@ -5186,7 +5186,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||||
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
|
||||
|
||||
if (IS_ERR(mst_state))
|
||||
return -EINVAL;
|
||||
return PTR_ERR(mst_state);
|
||||
|
||||
list_for_each_entry(pos, &mst_state->payloads, next) {
|
||||
|
||||
|
@ -2434,7 +2434,7 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
|
||||
{
|
||||
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
|
||||
if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
|
||||
return POWER_DOMAIN_PORT_DDI_IO_A;
|
||||
|
||||
return domains->ddi_io + (int)(port - domains->port_start);
|
||||
@ -2445,7 +2445,7 @@ intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port po
|
||||
{
|
||||
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
|
||||
if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
|
||||
return POWER_DOMAIN_PORT_DDI_LANES_A;
|
||||
|
||||
return domains->ddi_lanes + (int)(port - domains->port_start);
|
||||
@ -2471,7 +2471,7 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
|
||||
{
|
||||
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
|
||||
if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
|
||||
return POWER_DOMAIN_AUX_A;
|
||||
|
||||
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
|
||||
@ -2482,7 +2482,7 @@ intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch au
|
||||
{
|
||||
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
|
||||
if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
|
||||
return POWER_DOMAIN_AUX_TBT1;
|
||||
|
||||
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
|
||||
|
@ -612,6 +612,10 @@ static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
|
||||
|
||||
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
|
||||
|
||||
err = ttm_bo_wait(bo, true, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_ttm_move_notify(bo);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -664,8 +664,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
kvm_get_kvm(vgpu->vfio_device.kvm);
|
||||
|
||||
if (__kvmgt_vgpu_exist(vgpu))
|
||||
return -EEXIST;
|
||||
|
||||
@ -676,6 +674,7 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
|
||||
|
||||
vgpu->track_node.track_write = kvmgt_page_track_write;
|
||||
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
|
||||
kvm_get_kvm(vgpu->vfio_device.kvm);
|
||||
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
|
||||
&vgpu->track_node);
|
||||
|
||||
@ -715,15 +714,14 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
|
||||
|
||||
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
|
||||
&vgpu->track_node);
|
||||
kvm_put_kvm(vgpu->vfio_device.kvm);
|
||||
|
||||
kvmgt_protect_table_destroy(vgpu);
|
||||
gvt_cache_destroy(vgpu);
|
||||
|
||||
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
||||
|
||||
vgpu->attached = false;
|
||||
|
||||
if (vgpu->vfio_device.kvm)
|
||||
kvm_put_kvm(vgpu->vfio_device.kvm);
|
||||
}
|
||||
|
||||
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
|
||||
|
@ -577,7 +577,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
|
||||
if (scr_readw(r) != vc->vc_video_erase_char)
|
||||
break;
|
||||
if (r != q && new_rows >= rows + logo_lines) {
|
||||
save = kmalloc(array3_size(logo_lines, new_cols, 2),
|
||||
save = kzalloc(array3_size(logo_lines, new_cols, 2),
|
||||
GFP_KERNEL);
|
||||
if (save) {
|
||||
int i = min(cols, new_cols);
|
||||
|
Loading…
Reference in New Issue
Block a user