drm/i915/execlists: Suppress redundant preemption
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Mar 2019 17:08:58 +0000 (17:08 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Mar 2019 17:40:32 +0000 (17:40 +0000)
On unwinding the active request we give it a small (limited to internal
priority levels) boost to prevent it from being gazumped a second time.
However, this means that it can be promoted to above the request that
triggered the preemption request, causing a preempt-to-idle cycle for no
change. We can avoid this if we take the boost into account when
checking if the preemption request is valid.

v2: After preemption the active request will be after the preemptee if
they end up with equal priority.

v3: Tvrtko pointed out that this, the existing logic, makes
I915_PRIORITY_WAIT non-preemptible. Document this interesting quirk!

v4: Prove Tvrtko was right about WAIT being non-preemptible and test it.
v5: Except not all priorities were made equal, and the WAIT not preempting
is only if we start off as !NEWCLIENT.

v6: More commentary after coming to an understanding about what I had
forgotten to say.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190301170901.8340-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/intel_lrc.c

index 4f2187aa44e49b15968814297873923d0ee7eb57..3fd0c45a29208f64a90ba1cd9302cab2c44171cd 100644 (file)
 #define WA_TAIL_DWORDS 2
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
+#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT)
+
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                                            struct intel_engine_cs *engine,
                                            struct intel_context *ce);
@@ -190,8 +192,30 @@ static inline int rq_prio(const struct i915_request *rq)
 
 static int effective_prio(const struct i915_request *rq)
 {
+       int prio = rq_prio(rq);
+
+       /*
+        * On unwinding the active request, we give it a priority bump
+        * equivalent to a freshly submitted request. This protects it from
+        * being gazumped again, but it would be preferable if we didn't
+        * let it be gazumped in the first place!
+        *
+        * See __unwind_incomplete_requests()
+        */
+       if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
+               /*
+                * After preemption, we insert the active request at the
+                * end of the new priority level. This means that we will be
+                * _lower_ priority than the preemptee all things equal (and
+                * so the preemption is valid), so adjust our comparison
+                * accordingly.
+                */
+               prio |= ACTIVE_PRIORITY;
+               prio--;
+       }
+
        /* Restrict mere WAIT boosts from triggering preemption */
-       return rq_prio(rq) | __NO_PREEMPTION;
+       return prio | __NO_PREEMPTION;
 }
 
 static int queue_prio(const struct intel_engine_execlists *execlists)
@@ -359,7 +383,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
        struct i915_request *rq, *rn, *active = NULL;
        struct list_head *uninitialized_var(pl);
-       int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
+       int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
 
        lockdep_assert_held(&engine->timeline.lock);
 
@@ -390,9 +414,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
         * The active request is now effectively the start of a new client
         * stream, so give it the equivalent small priority bump to prevent
         * it being gazumped a second time by another peer.
+        *
+        * Note we have to be careful not to apply a priority boost to a request
+        * still spinning on its semaphores. If the request hasn't started, that
+        * means it is still waiting for its dependencies to be signaled, and
+        * if we apply a priority boost to this request, we will boost it past
+        * its signalers and so break PI.
+        *
+        * One consequence of this preemption boost is that we may jump
+        * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
+        * making those priorities non-preemptible. They will be moved forward
+        * in the priority queue, but they will not gain immediate access to
+        * the GPU.
         */
-       if (!(prio & I915_PRIORITY_NEWCLIENT)) {
-               prio |= I915_PRIORITY_NEWCLIENT;
+       if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
+               prio |= ACTIVE_PRIORITY;
                active->sched.attr.priority = prio;
                list_move_tail(&active->sched.link,
                               i915_sched_lookup_priolist(engine, prio));