mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 20:53:53 +08:00
drm/etnaviv: remove switch_context member from etnaviv_gpu
There is no need to store this in the gpu struct. MMU flushes are triggered correctly in reaction to MMU maps and unmaps, independent of the current ctx. Any required pipe switches can be infered from the current and the desired GPU exec state. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
This commit is contained in:
parent
fa67ac84a3
commit
4375ffffbf
@ -294,6 +294,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
unsigned int waitlink_offset = buffer->user_size - 16;
|
||||
u32 return_target, return_dwords;
|
||||
u32 link_target, link_dwords;
|
||||
bool switch_context = gpu->exec_state != cmdbuf->exec_state;
|
||||
|
||||
if (drm_debug & DRM_UT_DRIVER)
|
||||
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
||||
@ -306,7 +307,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
* need to append a mmu flush load state, followed by a new
|
||||
* link to this buffer - a total of four additional words.
|
||||
*/
|
||||
if (gpu->mmu->need_flush || gpu->switch_context) {
|
||||
if (gpu->mmu->need_flush || switch_context) {
|
||||
u32 target, extra_dwords;
|
||||
|
||||
/* link command */
|
||||
@ -321,7 +322,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
}
|
||||
|
||||
/* pipe switch commands */
|
||||
if (gpu->switch_context)
|
||||
if (switch_context)
|
||||
extra_dwords += 4;
|
||||
|
||||
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
|
||||
@ -349,10 +350,9 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
gpu->mmu->need_flush = false;
|
||||
}
|
||||
|
||||
if (gpu->switch_context) {
|
||||
if (switch_context) {
|
||||
etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
|
||||
gpu->exec_state = cmdbuf->exec_state;
|
||||
gpu->switch_context = false;
|
||||
}
|
||||
|
||||
/* And the link to the submitted buffer */
|
||||
@ -421,4 +421,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
|
||||
if (drm_debug & DRM_UT_DRIVER)
|
||||
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
||||
|
||||
gpu->lastctx = cmdbuf->ctx;
|
||||
}
|
||||
|
@ -1416,12 +1416,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||
submit->fence = dma_fence_get(fence);
|
||||
gpu->active_fence = submit->fence->seqno;
|
||||
|
||||
if (gpu->lastctx != cmdbuf->ctx) {
|
||||
gpu->mmu->need_flush = true;
|
||||
gpu->switch_context = true;
|
||||
gpu->lastctx = cmdbuf->ctx;
|
||||
}
|
||||
|
||||
if (cmdbuf->nr_pmrs) {
|
||||
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
|
||||
gpu->event[event[1]].cmdbuf = cmdbuf;
|
||||
@ -1662,7 +1656,7 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
|
||||
etnaviv_gpu_update_clock(gpu);
|
||||
etnaviv_gpu_hw_init(gpu);
|
||||
|
||||
gpu->switch_context = true;
|
||||
gpu->lastctx = NULL;
|
||||
gpu->exec_state = -1;
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
|
@ -106,7 +106,6 @@ struct etnaviv_gpu {
|
||||
struct mutex lock;
|
||||
struct etnaviv_chip_identity identity;
|
||||
struct etnaviv_file_private *lastctx;
|
||||
bool switch_context;
|
||||
|
||||
/* 'ring'-buffer: */
|
||||
struct etnaviv_cmdbuf *buffer;
|
||||
|
Loading…
Reference in New Issue
Block a user