drm/amdgpu: rework TDR in scheduler (v2)
authorMonk Liu <Monk.Liu@amd.com>
Fri, 4 Mar 2016 10:51:02 +0000 (18:51 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 2 May 2016 19:19:57 +0000 (15:19 -0400)
Add two callbacks to scheduler to maintain jobs, and invoked for
job timeout calculations. Now TDR measures time gap from
job is processed by hw.

v2:
fix typo

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c

index 9bf72b24495c39850d60110f4810693880c3a9b5..ccb28468ece8a82e778b15fe382f31a81bc265d1 100644 (file)
@@ -754,6 +754,7 @@ void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                      struct amd_sched_entity *entity, void *owner,
                      struct fence **f);
+void amdgpu_job_timeout_func(struct work_struct *work);
 
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
index 23266b454aecb91ae47122f025207f4ae8bfd243..9025671d21c3465fadefbca8920dbccb458c69d6 100644 (file)
@@ -871,6 +871,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        r = amd_sched_job_init(&job->base, &ring->sched,
                                                &p->ctx->rings[ring->idx].entity,
+                                               amdgpu_job_timeout_func,
                                                p->filp, &fence);
        if (r) {
                amdgpu_job_free(job);
index 23468088a9958e83265e348c790e8a175cfa203b..961cae4a1955e44d7c02f0beb22b52a957ac3448 100644 (file)
@@ -34,6 +34,15 @@ static void amdgpu_job_free_handler(struct work_struct *ws)
        kfree(job);
 }
 
+void amdgpu_job_timeout_func(struct work_struct *work)
+{
+       struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
+       DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
+                               job->base.sched->name,
+                               (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
+                               job->ring->fence_drv.sync_seq);
+}
+
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
                     struct amdgpu_job **job)
 {
@@ -103,7 +112,10 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
        if (!f)
                return -EINVAL;
 
-       r = amd_sched_job_init(&job->base, &ring->sched, entity, owner, &fence);
+       r = amd_sched_job_init(&job->base, &ring->sched,
+                                                       entity, owner,
+                                                       amdgpu_job_timeout_func,
+                                                       &fence);
        if (r)
                return r;
 
@@ -180,4 +192,6 @@ err:
 struct amd_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_job_dependency,
        .run_job = amdgpu_job_run,
+       .begin_job = amd_sched_job_begin,
+       .finish_job = amd_sched_job_finish,
 };
index 9a9fffdc272b0ab6056c4017803aebedca4f4de6..b7e8071448c67860176f97ddafb2198351b2e5f7 100644 (file)
@@ -324,6 +324,40 @@ static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
        schedule_work(&job->work_free_job);
 }
 
+/* job_finish is called after hw fence signaled, and
+ * the job had already been deleted from ring_mirror_list
+ */
+void amd_sched_job_finish(struct amd_sched_job *s_job)
+{
+       struct amd_sched_job *next;
+       struct amd_gpu_scheduler *sched = s_job->sched;
+
+       if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+               cancel_delayed_work(&s_job->work_tdr); /*TODO: how to deal the case that tdr is running */
+
+               /* queue TDR for next job */
+               next = list_first_entry_or_null(&sched->ring_mirror_list,
+                                               struct amd_sched_job, node);
+
+               if (next) {
+                       INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
+                       schedule_delayed_work(&next->work_tdr, sched->timeout);
+               }
+       }
+}
+
+void amd_sched_job_begin(struct amd_sched_job *s_job)
+{
+       struct amd_gpu_scheduler *sched = s_job->sched;
+
+       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+               list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
+       {
+               INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
+               schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+       }
+}
+
 /**
  * Submit a job to the job queue
  *
@@ -347,6 +381,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 int amd_sched_job_init(struct amd_sched_job *job,
                                                struct amd_gpu_scheduler *sched,
                                                struct amd_sched_entity *entity,
+                                               void (*timeout_cb)(struct work_struct *work),
                                                void *owner, struct fence **fence)
 {
        INIT_LIST_HEAD(&job->node);
@@ -357,6 +392,7 @@ int amd_sched_job_init(struct amd_sched_job *job,
                return -ENOMEM;
 
        job->s_fence->s_job = job;
+       job->timeout_callback = timeout_cb;
 
        if (fence)
                *fence = &job->s_fence->base;
@@ -415,6 +451,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        /* remove job from ring_mirror_list */
        spin_lock_irqsave(&sched->job_list_lock, flags);
        list_del_init(&s_fence->s_job->node);
+       sched->ops->finish_job(s_fence->s_job);
        spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
        amd_sched_fence_signal(s_fence);
index b26148d24a3d98a533bb8b64fb5b0db1823b9b31..a5700aded5bfd0b03057859a0e67cc2b943bc07c 100644 (file)
@@ -85,6 +85,8 @@ struct amd_sched_job {
        struct fence_cb                cb_free_job;
        struct work_struct             work_free_job;
        struct list_head                           node;
+       struct delayed_work work_tdr;
+       void (*timeout_callback) (struct work_struct *work);
 };
 
 extern const struct fence_ops amd_sched_fence_ops;
@@ -105,6 +107,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 struct amd_sched_backend_ops {
        struct fence *(*dependency)(struct amd_sched_job *sched_job);
        struct fence *(*run_job)(struct amd_sched_job *sched_job);
+       void (*begin_job)(struct amd_sched_job *sched_job);
+       void (*finish_job)(struct amd_sched_job *sched_job);
 };
 
 enum amd_sched_priority {
@@ -150,7 +154,10 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence);
 int amd_sched_job_init(struct amd_sched_job *job,
                                        struct amd_gpu_scheduler *sched,
                                        struct amd_sched_entity *entity,
+                                       void (*timeout_cb)(struct work_struct *work),
                                        void *owner, struct fence **fence);
 void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
                                                                struct amd_sched_job *s_job);
+void amd_sched_job_finish(struct amd_sched_job *s_job);
+void amd_sched_job_begin(struct amd_sched_job *s_job);
 #endif
index 33ddd38185d518bbcf044dc35ac72b28a78031f7..2a732c4903755db7d3838f9b6c8ae00ee2804309 100644 (file)
@@ -63,6 +63,7 @@ void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
        unsigned long flags;
        spin_lock_irqsave(&sched->job_list_lock, flags);
        list_add_tail(&s_job->node, &sched->ring_mirror_list);
+       sched->ops->begin_job(s_job);
        spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }