drm/i915/execlists: Flush the CS events before unpinning
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 3 Oct 2018 11:09:41 +0000 (12:09 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 3 Oct 2018 13:27:16 +0000 (14:27 +0100)
Inside the execlists submission tasklet, we often make the mistake of
assuming that everything beneath the request is available for use.
However, the submission and the request live on two separate timelines,
and the request contents may be freed from an early retirement before we
have had a chance to run the submission tasklet (think ksoftirqd). To
safeguard ourselves against any mistakes, flush the tasklet before we
unpin the context if execlists still has a reference to this context.

v2: Pull hw_context->active tracking into schedule_in and schedule_out.

References: 60367132a214 ("drm/i915: Avoid use-after-free of ctx in request tracepoints")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181003110941.27886-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/intel_lrc.c

index 08165f6a0a842482cd1eec32f5bd131e39e44500..f6d870b1f73e397971e4f55714d8524f68220395 100644 (file)
@@ -163,6 +163,7 @@ struct i915_gem_context {
        /** engine: per-engine logical HW state */
        struct intel_context {
                struct i915_gem_context *gem_context;
+               struct intel_engine_cs *active;
                struct i915_vma *state;
                struct intel_ring *ring;
                u32 *lrc_reg_state;
index 28d56387edf595023778bb9cdcf886335f79a8e5..ff0e2b36cb8ba41e14aebab88b1437a67c5589aa 100644 (file)
@@ -282,6 +282,8 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
                __i915_request_unsubmit(rq);
                unwind_wa_tail(rq);
 
+               GEM_BUG_ON(rq->hw_context->active);
+
                GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
                if (rq_prio(rq) != prio) {
                        prio = rq_prio(rq);
@@ -345,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
 static inline void
 execlists_context_schedule_in(struct i915_request *rq)
 {
+       GEM_BUG_ON(rq->hw_context->active);
+
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
        intel_engine_context_in(rq->engine);
+       rq->hw_context->active = rq->engine;
 }
 
 static inline void
 execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
 {
+       rq->hw_context->active = NULL;
        intel_engine_context_out(rq->engine);
        execlists_context_status_change(rq, status);
        trace_i915_request_out(rq);
@@ -1079,6 +1085,28 @@ static void execlists_context_destroy(struct intel_context *ce)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
+       struct intel_engine_cs *engine;
+
+       /*
+        * The tasklet may still be using a pointer to our state, via an
+        * old request. However, since we know we only unpin the context
+        * on retirement of the following request, we know that the last
+        * request referencing us will have had a completion CS interrupt.
+        * If we see that it is still active, it means that the tasklet hasn't
+        * had the chance to run yet; let it run before we teardown the
+        * reference it may use.
+        */
+       engine = READ_ONCE(ce->active);
+       if (unlikely(engine)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&engine->timeline.lock, flags);
+               process_csb(engine);
+               spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+               GEM_BUG_ON(READ_ONCE(ce->active));
+       }
+
        i915_gem_context_unpin_hw_id(ce->gem_context);
 
        intel_ring_unpin(ce->ring);