drm/amdgpu: fix user fence write race condition

The buffer object backing the user fence is reserved using the non-user
fence, i.e., as soon as the non-user fence is signaled, the user fence
buffer object can be moved or even destroyed.

Therefore, emit the user fence first.

Both fences have the same cache invalidation behavior, so this should
have no user-visible effect.

Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Nicolai Hähnle 2018-06-29 13:23:25 +02:00 committed by Alex Deucher
parent 860c15e903
commit 9fc15f5fc8

View File

@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
/* wrap the last IB with fence */
if (job && job->uf_addr) {
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
r = amdgpu_fence_emit(ring, f, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
/* wrap the last IB with fence */
if (job && job->uf_addr) {
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);