mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
drm/radeon/kms: sync across multiple rings when doing bo moves v3
We need to synchronize across rings when doing a bo move to make sure we the buffer is idle if it's in use by a different ring than the ring doing the move. v2: fix fence setup for bo moves v3: add missing ring lock/unlock Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
93504fce28
commit
3000bf3933
@ -1489,6 +1489,8 @@ struct radeon_device {
|
|||||||
unsigned debugfs_count;
|
unsigned debugfs_count;
|
||||||
/* virtual memory */
|
/* virtual memory */
|
||||||
struct radeon_vm_manager vm_manager;
|
struct radeon_vm_manager vm_manager;
|
||||||
|
/* ring used for bo copies */
|
||||||
|
u32 copy_ring;
|
||||||
};
|
};
|
||||||
|
|
||||||
int radeon_device_init(struct radeon_device *rdev,
|
int radeon_device_init(struct radeon_device *rdev,
|
||||||
|
@ -1036,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||||||
else
|
else
|
||||||
rdev->num_crtc = 2;
|
rdev->num_crtc = 2;
|
||||||
|
|
||||||
|
/* set the ring used for bo copies */
|
||||||
|
rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
|
||||||
|
|
||||||
switch (rdev->family) {
|
switch (rdev->family) {
|
||||||
case CHIP_R100:
|
case CHIP_R100:
|
||||||
case CHIP_RV100:
|
case CHIP_RV100:
|
||||||
|
@ -223,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
uint64_t old_start, new_start;
|
uint64_t old_start, new_start;
|
||||||
struct radeon_fence *fence;
|
struct radeon_fence *fence;
|
||||||
int r;
|
int r, i;
|
||||||
|
|
||||||
rdev = radeon_get_rdev(bo->bdev);
|
rdev = radeon_get_rdev(bo->bdev);
|
||||||
r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
|
r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@ -255,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|||||||
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready) {
|
if (!rdev->ring[rdev->copy_ring].ready) {
|
||||||
DRM_ERROR("Trying to move memory with CP turned off.\n");
|
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
|
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
|
||||||
|
|
||||||
|
/* sync other rings */
|
||||||
|
if (rdev->family >= CHIP_R600) {
|
||||||
|
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||||
|
/* no need to sync to our own or unused rings */
|
||||||
|
if (i == rdev->copy_ring || !rdev->ring[i].ready)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!fence->semaphore) {
|
||||||
|
r = radeon_semaphore_create(rdev, &fence->semaphore);
|
||||||
|
/* FIXME: handle semaphore error */
|
||||||
|
if (r)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
|
||||||
|
/* FIXME: handle ring lock error */
|
||||||
|
if (r)
|
||||||
|
continue;
|
||||||
|
radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
|
||||||
|
radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
|
||||||
|
|
||||||
|
r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
|
||||||
|
/* FIXME: handle ring lock error */
|
||||||
|
if (r)
|
||||||
|
continue;
|
||||||
|
radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
|
||||||
|
radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
r = radeon_copy(rdev, old_start, new_start,
|
r = radeon_copy(rdev, old_start, new_start,
|
||||||
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
|
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
|
||||||
fence);
|
fence);
|
||||||
|
Loading…
Reference in New Issue
Block a user