drm/i915: After reset on sanitization, reset the engine backends
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 31 May 2018 08:22:45 +0000 (09:22 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 31 May 2018 18:29:53 +0000 (19:29 +0100)
As we reset the GPU on suspend/resume, we also do need to reset the
engine state tracking so call into the engine backends. This is
especially important so that we can also sanitize the state tracking
across resume.

References: https://bugs.freedesktop.org/show_bug.cgi?id=106702
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180531082246.9763-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_lrc.c

index b312ac006d2420da1e643600164debd563e482f5..9b8fa1866cc9705e3da037585d5f12792fca7c21 100644 (file)
@@ -4959,7 +4959,22 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
 
 void i915_gem_sanitize(struct drm_i915_private *i915)
 {
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       GEM_TRACE("\n");
+
        mutex_lock(&i915->drm.struct_mutex);
+
+       intel_runtime_pm_get(i915);
+       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+       /*
+        * As we have just resumed the machine and woken the device up from
+        * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+        * back to defaults, recovering from whatever wedged state we left it
+        * in and so worth trying to use the device once more.
+        */
        if (i915_terminally_wedged(&i915->gpu_error))
                i915_gem_unset_wedged(i915);
 
@@ -4974,6 +4989,15 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
        if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
                WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
 
+       /* Reset the submission backend after resume as well as the GPU reset */
+       for_each_engine(engine, i915, id) {
+               if (engine->reset.reset)
+                       engine->reset.reset(engine, NULL);
+       }
+
+       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+       intel_runtime_pm_put(i915);
+
        i915_gem_contexts_lost(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 }
index 4bcd3206991dacf3b073ce9a6a9ad52218766c5f..38696d9cc02ec04d82dd0a9c6d081291584714ec 100644 (file)
@@ -1788,9 +1788,6 @@ static void enable_execlists(struct intel_engine_cs *engine)
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
                   engine->status_page.ggtt_offset);
        POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-
-       /* Following the reset, we need to reload the CSB read/write pointers */
-       engine->execlists.csb_head = -1;
 }
 
 static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -1962,6 +1959,9 @@ static void execlists_reset(struct intel_engine_cs *engine,
        __unwind_incomplete_requests(engine);
        spin_unlock(&engine->timeline.lock);
 
+       /* Following the reset, we need to reload the CSB read/write pointers */
+       engine->execlists.csb_head = -1;
+
        local_irq_restore(flags);
 
        /*
@@ -2461,6 +2461,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
                        upper_32_bits(ce->lrc_desc);
        }
 
+       engine->execlists.csb_head = -1;
+
        return 0;
 
 error: