mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-11 16:24:26 +08:00
drm/amdgpu: stop using TTMs fault callback
Implement the fault handler ourself using the provided TTM functions. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/392324/
This commit is contained in:
parent
8e0310f0ff
commit
d3ef581afa
@ -1341,19 +1341,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||||||
* Returns:
|
* Returns:
|
||||||
* 0 for success or a negative error code on failure.
|
* 0 for success or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
struct ttm_operation_ctx ctx = { false, false };
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
struct amdgpu_bo *abo;
|
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
|
||||||
unsigned long offset, size;
|
unsigned long offset, size;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!amdgpu_bo_is_amdgpu_bo(bo))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
abo = ttm_to_amdgpu_bo(bo);
|
|
||||||
|
|
||||||
/* Remember that this BO was accessed by the CPU */
|
/* Remember that this BO was accessed by the CPU */
|
||||||
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||||
|
|
||||||
@ -1367,7 +1362,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||||||
|
|
||||||
/* Can't move a pinned BO to visible VRAM */
|
/* Can't move a pinned BO to visible VRAM */
|
||||||
if (abo->tbo.pin_count > 0)
|
if (abo->tbo.pin_count > 0)
|
||||||
return -EINVAL;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
/* hurrah the memory is not visible ! */
|
/* hurrah the memory is not visible ! */
|
||||||
atomic64_inc(&adev->num_vram_cpu_page_faults);
|
atomic64_inc(&adev->num_vram_cpu_page_faults);
|
||||||
@ -1379,15 +1374,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||||||
abo->placement.busy_placement = &abo->placements[1];
|
abo->placement.busy_placement = &abo->placements[1];
|
||||||
|
|
||||||
r = ttm_bo_validate(bo, &abo->placement, &ctx);
|
r = ttm_bo_validate(bo, &abo->placement, &ctx);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
|
||||||
return r;
|
return VM_FAULT_NOPAGE;
|
||||||
|
else if (unlikely(r))
|
||||||
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
offset = bo->mem.start << PAGE_SHIFT;
|
offset = bo->mem.start << PAGE_SHIFT;
|
||||||
/* this should never happen */
|
/* this should never happen */
|
||||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||||
(offset + size) > adev->gmc.visible_vram_size)
|
(offset + size) > adev->gmc.visible_vram_size)
|
||||||
return -EINVAL;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
|
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
|||||||
bool evict,
|
bool evict,
|
||||||
struct ttm_resource *new_mem);
|
struct ttm_resource *new_mem);
|
||||||
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
|
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
|
||||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
||||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||||
bool shared);
|
bool shared);
|
||||||
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
|
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
|
||||||
|
@ -1708,7 +1708,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
|
|||||||
.verify_access = &amdgpu_verify_access,
|
.verify_access = &amdgpu_verify_access,
|
||||||
.move_notify = &amdgpu_bo_move_notify,
|
.move_notify = &amdgpu_bo_move_notify,
|
||||||
.release_notify = &amdgpu_bo_release_notify,
|
.release_notify = &amdgpu_bo_release_notify,
|
||||||
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
|
|
||||||
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
|
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
|
||||||
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
|
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
|
||||||
.access_memory = &amdgpu_ttm_access_memory,
|
.access_memory = &amdgpu_ttm_access_memory,
|
||||||
@ -2088,15 +2087,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||||||
adev->mman.buffer_funcs_enabled = enable;
|
adev->mman.buffer_funcs_enabled = enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
|
||||||
|
{
|
||||||
|
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
|
||||||
|
vm_fault_t ret;
|
||||||
|
|
||||||
|
ret = ttm_bo_vm_reserve(bo, vmf);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = amdgpu_bo_fault_reserve_notify(bo);
|
||||||
|
if (ret)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||||
|
TTM_BO_VM_NUM_PREFAULT, 1);
|
||||||
|
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
dma_resv_unlock(bo->base.resv);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct vm_operations_struct amdgpu_ttm_vm_ops = {
|
||||||
|
.fault = amdgpu_ttm_fault,
|
||||||
|
.open = ttm_bo_vm_open,
|
||||||
|
.close = ttm_bo_vm_close,
|
||||||
|
.access = ttm_bo_vm_access
|
||||||
|
};
|
||||||
|
|
||||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct drm_file *file_priv = filp->private_data;
|
struct drm_file *file_priv = filp->private_data;
|
||||||
struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
|
struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
|
||||||
|
int r;
|
||||||
|
|
||||||
if (adev == NULL)
|
r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
|
||||||
return -EINVAL;
|
if (unlikely(r != 0))
|
||||||
|
return r;
|
||||||
|
|
||||||
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
|
vma->vm_ops = &amdgpu_ttm_vm_ops;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||||
|
Loading…
Reference in New Issue
Block a user