mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
Merge branch 'drm-fixes-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
radeon and amdgpu fixes for 4.8. Nothing major: - fix a performance regression due to the LRU changes in 4.7 - 32 bit fixes - fix a PLL regression - misc bug fixes * 'drm-fixes-4.8' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu: skip TV/CV in display parsing drm/amdgpu: avoid a possible array overflow drm/amdgpu: fix lru size grouping v2 drm/amdgpu: fix timeout value check in amd_sched_job_recovery drm/amdgpu: fix sdma_v2_4_ring_test_ib drm/amdgpu: fix amdgpu_move_blit on 32bit systems drm/radeon: fix radeon_move_blit on 32bit systems drm/radeon: only apply the SS fractional workaround to RS[78]80
This commit is contained in:
commit
179ca3bb2c
@ -426,6 +426,8 @@ struct amdgpu_mman {
|
|||||||
|
|
||||||
/* custom LRU management */
|
/* custom LRU management */
|
||||||
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
||||||
|
/* guard for log2_size array, don't add anything in between */
|
||||||
|
struct amdgpu_mman_lru guard;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
||||||
|
@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
|
|||||||
(le16_to_cpu(path->usConnObjectId) &
|
(le16_to_cpu(path->usConnObjectId) &
|
||||||
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
||||||
|
|
||||||
|
/* Skip TV/CV support */
|
||||||
|
if ((le16_to_cpu(path->usDeviceTag) ==
|
||||||
|
ATOM_DEVICE_TV1_SUPPORT) ||
|
||||||
|
(le16_to_cpu(path->usDeviceTag) ==
|
||||||
|
ATOM_DEVICE_CV_SUPPORT))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
|
||||||
|
DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
|
||||||
|
con_obj_id, le16_to_cpu(path->usDeviceTag));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
connector_type =
|
connector_type =
|
||||||
object_connector_convert[con_obj_id];
|
object_connector_convert[con_obj_id];
|
||||||
connector_object_id = con_obj_id;
|
connector_object_id = con_obj_id;
|
||||||
|
@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||||||
|
|
||||||
adev = amdgpu_get_adev(bo->bdev);
|
adev = amdgpu_get_adev(bo->bdev);
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
ring = adev->mman.buffer_funcs_ring;
|
||||||
old_start = old_mem->start << PAGE_SHIFT;
|
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||||
new_start = new_mem->start << PAGE_SHIFT;
|
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||||
|
|
||||||
switch (old_mem->mem_type) {
|
switch (old_mem->mem_type) {
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
|
|||||||
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
||||||
|
|
||||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||||
|
while ((++lru)->lru[tbo->mem.mem_type] == res)
|
||||||
|
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
|
|||||||
struct list_head *res = lru->swap_lru;
|
struct list_head *res = lru->swap_lru;
|
||||||
|
|
||||||
lru->swap_lru = &tbo->swap;
|
lru->swap_lru = &tbo->swap;
|
||||||
|
while ((++lru)->swap_lru == res)
|
||||||
|
lru->swap_lru = &tbo->swap;
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||||||
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
|
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
||||||
|
adev->mman.guard.lru[j] = NULL;
|
||||||
|
adev->mman.guard.swap_lru = NULL;
|
||||||
|
|
||||||
adev->mman.initialized = true;
|
adev->mman.initialized = true;
|
||||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
||||||
adev->mc.real_vram_size >> PAGE_SHIFT);
|
adev->mc.real_vram_size >> PAGE_SHIFT);
|
||||||
|
@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||||||
DRM_ERROR("amdgpu: IB test timed out\n");
|
DRM_ERROR("amdgpu: IB test timed out\n");
|
||||||
r = -ETIMEDOUT;
|
r = -ETIMEDOUT;
|
||||||
goto err1;
|
goto err1;
|
||||||
} else if (r) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
|
|||||||
spin_lock(&sched->job_list_lock);
|
spin_lock(&sched->job_list_lock);
|
||||||
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
||||||
struct amd_sched_job, node);
|
struct amd_sched_job, node);
|
||||||
if (s_job)
|
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
|
||||||
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
||||||
|
|
||||||
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
||||||
|
@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
|||||||
if (radeon_crtc->ss.refdiv) {
|
if (radeon_crtc->ss.refdiv) {
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
|
||||||
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
|
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
|
||||||
if (rdev->family >= CHIP_RV770)
|
if (ASIC_IS_AVIVO(rdev) &&
|
||||||
|
rdev->family != CHIP_RS780 &&
|
||||||
|
rdev->family != CHIP_RS880)
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|||||||
|
|
||||||
rdev = radeon_get_rdev(bo->bdev);
|
rdev = radeon_get_rdev(bo->bdev);
|
||||||
ridx = radeon_copy_ring_index(rdev);
|
ridx = radeon_copy_ring_index(rdev);
|
||||||
old_start = old_mem->start << PAGE_SHIFT;
|
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||||
new_start = new_mem->start << PAGE_SHIFT;
|
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||||
|
|
||||||
switch (old_mem->mem_type) {
|
switch (old_mem->mem_type) {
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
|
Loading…
Reference in New Issue
Block a user