list_del(&submit->node);
etnaviv_submit_put(submit);
- /*
- * We need to balance the runtime PM count caused by
- * each submission. Upon submission, we increment
- * the runtime PM counter, and allocate one event.
- * So here, we put the runtime PM count for each
- * completed event.
- */
- pm_runtime_put_autosuspend(gpu->dev);
}
gpu->retired_fence = fence;
return -ETIMEDOUT;
}
-int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
-{
- return pm_runtime_get_sync(gpu->dev);
-}
-
-void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
-{
- pm_runtime_mark_last_busy(gpu->dev);
- pm_runtime_put_autosuspend(gpu->dev);
-}
-
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
struct etnaviv_event *event, unsigned int flags)
{
unsigned int i, nr_events = 1, event[3];
int ret;
- ret = etnaviv_gpu_pm_get_sync(gpu);
+ ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
return ret;
+ submit->runtime_resumed = true;
/*
* if there are performance monitor requests we need to have
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
- goto out_pm_put;
+ return ret;
}
mutex_lock(&gpu->lock);
list_add_tail(&submit->node, &gpu->active_submit_list);
- /* We're committed to adding this command buffer, hold a PM reference */
- pm_runtime_get_noresume(gpu->dev);
-
hangcheck_timer_reset(gpu);
ret = 0;
out_unlock:
mutex_unlock(&gpu->lock);
-out_pm_put:
- etnaviv_gpu_pm_put(gpu);
-
return ret;
}