2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 15:13:55 +08:00

drm/radeon: fix VM flush on cayman/aruba (v3)

We need to wait for the GPUVM flush to complete.  There
was some confusion as to how this mechanism was supposed
to work.  The operation is not atomic.  For GPU initiated
invalidations you need to read back a VM register to
introduce enough latency for the update to complete.

v2: drop gart changes
v3: just read back rather than polling

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
This commit is contained in:
Alex Deucher 2015-01-05 19:42:25 -05:00
parent 79305ec6e6
commit cbfc35b90f
3 changed files with 40 additions and 0 deletions

View File

@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for the invalidate to complete */
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); /* ref */
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0x20); /* poll interval */
/* sync PFP to ME, otherwise we might get invalid PFP reads */ /* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0); radeon_ring_write(ring, 0x0);

View File

@ -463,5 +463,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for invalidate to complete */
radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0); /* value */
} }

View File

@ -1133,6 +1133,23 @@
#define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A #define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_WAIT_REG_MEM 0x3C
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
/* 0 - always
* 1 - <
* 2 - <=
* 3 - ==
* 4 - !=
* 5 - >=
* 6 - >
*/
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
/* 0 - reg
* 1 - mem
*/
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
/* 0 - me
* 1 - pfp
*/
#define PACKET3_MEM_WRITE 0x3D #define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
@ -1272,6 +1289,13 @@
(1 << 21) | \ (1 << 21) | \
(((n) & 0xFFFFF) << 0)) (((n) & 0xFFFFF) << 0))
#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
(1 << 27) | \
(1 << 26))
#define DMA_SRBM_READ_PACKET ((9 << 28) | \
(1 << 27))
/* async DMA Packet types */ /* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3 #define DMA_PACKET_COPY 0x3