unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
+ bool switch_context = gpu->exec_state != cmdbuf->exec_state;
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || gpu->switch_context) {
+ if (gpu->mmu->need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
}
/* pipe switch commands */
- if (gpu->switch_context)
+ if (switch_context)
extra_dwords += 4;
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
gpu->mmu->need_flush = false;
}
- if (gpu->switch_context) {
+ if (switch_context) {
etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
gpu->exec_state = cmdbuf->exec_state;
- gpu->switch_context = false;
}
/* And the link to the submitted buffer */
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+ gpu->lastctx = cmdbuf->ctx;
}
submit->fence = dma_fence_get(fence);
gpu->active_fence = submit->fence->seqno;
- if (gpu->lastctx != cmdbuf->ctx) {
- gpu->mmu->need_flush = true;
- gpu->switch_context = true;
- gpu->lastctx = cmdbuf->ctx;
- }
-
if (cmdbuf->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
gpu->event[event[1]].cmdbuf = cmdbuf;
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->switch_context = true;
+ gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);