2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

drm/radeon: Remove tests for -ERESTART from the TTM code.

Also sets affected TTM calls up to not wait interruptible, since
that would cause an in-kernel spin until the TTM call succeeds, since
the Radeon code does not return to user-space when a signal is received.

Modifies interruptible fence waits to return -ERESTARTSYS rather than
-EBUSY when interrupted by a signal, since that's the (yet undocumented)
semantics required by the TTM sync object hooks.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Thomas Hellstrom 2009-12-07 18:36:19 +01:00 committed by Dave Airlie
parent 98ffc4158e
commit 5cc6fbab9d
2 changed files with 14 additions and 29 deletions

View File

@ -197,9 +197,8 @@ retry:
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r == -ERESTARTSYS)) {
return -EBUSY;
}
if (unlikely(r != 0))
return r;
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,

View File

@ -121,16 +121,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
INIT_LIST_HEAD(&bo->list);
flags = radeon_ttm_flags_from_domain(domain);
retry:
/* Kernel allocation are uninterruptible */
r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
flags, 0, 0, true, NULL, size,
flags, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
/* ttm call radeon_ttm_object_object_destroy if error happen */
dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
size, flags);
if (r != -ERESTARTSYS)
dev_err(rdev->dev,
"object_init failed for (%ld, 0x%08X)\n",
size, flags);
return r;
}
*bo_ptr = bo;
@ -200,18 +199,14 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
retry:
r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, true, false);
r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
}
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
}
return r;
}
@ -228,15 +223,10 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
retry:
r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, true, false);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
}
return 0;
return r;
}
int radeon_bo_evict_vram(struct radeon_device *rdev)
@ -346,15 +336,11 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
radeon_ttm_placement_from_domain(bo,
lobj->rdomain);
}
retry:
r = ttm_buffer_object_validate(&bo->tbo,
&bo->placement,
true, false);
if (unlikely(r)) {
if (r == -ERESTART)
goto retry;
if (unlikely(r))
return r;
}
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;