drm/amdgpu: send VCE IB tests directly to the ring again
authorChristian König <christian.koenig@amd.com>
Wed, 3 Feb 2016 15:50:56 +0000 (16:50 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 10 Feb 2016 19:17:24 +0000 (14:17 -0500)
We need the IB test for GPU resets as well and
the scheduler should be stoped then.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h

index 66e97ea0e6b04f80dfaaf949b40753240be4fd2d..97c22212d0485109307512c66d76a4aaa167dd61 100644 (file)
@@ -337,7 +337,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 
                amdgpu_vce_note_usage(adev);
 
-               r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
+               r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
                if (r)
                        DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
 
@@ -411,9 +411,11 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+       r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
        if (r)
                goto err;
+
+       amdgpu_job_free(job);
        if (fence)
                *fence = fence_get(f);
        fence_put(f);
@@ -435,7 +437,7 @@ err:
  * Close up a stream for HW test or if userspace failed to do so
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                              struct fence **fence)
+                              bool direct, struct fence **fence)
 {
        const unsigned ib_size_dw = 1024;
        struct amdgpu_job *job;
@@ -468,9 +470,21 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
-       if (r)
-               goto err;
+
+       if (direct) {
+               r = amdgpu_ib_schedule(ring, 1, ib,
+                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+               if (r)
+                       goto err;
+
+               amdgpu_job_free(job);
+       } else {
+               r = amdgpu_job_submit(job, ring,
+                                     AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+               if (r)
+                       goto err;
+       }
+
        if (fence)
                *fence = fence_get(f);
        fence_put(f);
@@ -811,7 +825,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
                goto error;
        }
 
-       r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
        if (r) {
                DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
                goto error;
index 5538cf725c85afc8765df47920262bec9abe5801..ef99d237018259bf95489977f3ff6c5fc4a7c767 100644 (file)
@@ -31,7 +31,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
                              struct fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                              struct fence **fence);
+                              bool direct, struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);