drm/i915/execlists: Keep request->priority for its lifetime
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 3 Oct 2017 20:34:50 +0000 (21:34 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 4 Oct 2017 16:52:46 +0000 (17:52 +0100)
With preemption, we will want to "unsubmit" a request, taking it back
from the hw and returning it to the priority sorted execution list. In
order to know where to insert it into that list, we need to remember
its adjust priority (which may change even as it was being executed).

This also affects reset for execlists as we are now unsubmitting the
requests following the reset (rather than directly writing the ELSP for
the inflight contexts). This turns reset into an accidental preemption
point, as after the reset we may choose a different pair of contexts to
submit to hw.

GuC is not updated as this series doesn't add preemption to the GuC
submission, and so it can keep benefiting from the early pruning of the
DFS inside execlists_schedule() for a little longer. We also need to
find a way of reducing the cost of that DFS...

v2: Include priority in error-state

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-6-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_lrc.c

index 31292afb961d7bf83cf67c51202ac7e264e1195b..1fc7080bfa7b8867b36c8e2156ec40b8d3e2095b 100644 (file)
@@ -982,6 +982,7 @@ struct i915_gpu_state {
                        pid_t pid;
                        u32 handle;
                        u32 hw_id;
+                       int priority;
                        int ban_score;
                        int active;
                        int guilty;
@@ -1004,6 +1005,7 @@ struct i915_gpu_state {
                        long jiffies;
                        pid_t pid;
                        u32 context;
+                       int priority;
                        int ban_score;
                        u32 seqno;
                        u32 head;
index c14552ab270bc34a8d67936e74b7a0b72e05b2e9..dc91b32d699ee83ef0cb087551d40d7078b8fd06 100644 (file)
@@ -377,9 +377,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
        if (!erq->seqno)
                return;
 
-       err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+       err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
                   prefix, erq->pid, erq->ban_score,
-                  erq->context, erq->seqno,
+                  erq->context, erq->seqno, erq->priority,
                   jiffies_to_msecs(jiffies - erq->jiffies),
                   erq->head, erq->tail);
 }
@@ -388,9 +388,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
                                const char *header,
                                const struct drm_i915_error_context *ctx)
 {
-       err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
+       err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
                   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-                  ctx->ban_score, ctx->guilty, ctx->active);
+                  ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
 }
 
 static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -1271,6 +1271,7 @@ static void record_request(struct drm_i915_gem_request *request,
                           struct drm_i915_error_request *erq)
 {
        erq->context = request->ctx->hw_id;
+       erq->priority = request->priotree.priority;
        erq->ban_score = atomic_read(&request->ctx->ban_score);
        erq->seqno = request->global_seqno;
        erq->jiffies = request->emitted_jiffies;
@@ -1364,6 +1365,7 @@ static void record_context(struct drm_i915_error_context *e,
 
        e->handle = ctx->user_handle;
        e->hw_id = ctx->hw_id;
+       e->priority = ctx->priority;
        e->ban_score = atomic_read(&ctx->ban_score);
        e->guilty = atomic_read(&ctx->guilty_count);
        e->active = atomic_read(&ctx->active_count);
index 1703bf6c98d617e7a35b872f6874b266c4c76836..5821762d90078d6b6f5f53f85928ff4a7d38dc15 100644 (file)
@@ -584,8 +584,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                        }
 
                        INIT_LIST_HEAD(&rq->priotree.link);
-                       rq->priotree.priority = INT_MAX;
-
                        __i915_gem_request_submit(rq);
                        trace_i915_gem_request_in(rq, port_index(port, execlists));
                        last = rq;
@@ -793,6 +791,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                                execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 
                                trace_i915_gem_request_out(rq);
+                               rq->priotree.priority = INT_MAX;
                                i915_gem_request_put(rq);
 
                                execlists_port_complete(execlists, port);
@@ -845,11 +844,15 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
+static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+{
+       return container_of(pt, struct drm_i915_gem_request, priotree);
+}
+
 static struct intel_engine_cs *
 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 {
-       struct intel_engine_cs *engine =
-               container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+       struct intel_engine_cs *engine = pt_to_request(pt)->engine;
 
        GEM_BUG_ON(!locked);
 
@@ -905,6 +908,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                 * engines.
                 */
                list_for_each_entry(p, &pt->signalers_list, signal_link) {
+                       if (i915_gem_request_completed(pt_to_request(p->signaler)))
+                               continue;
+
                        GEM_BUG_ON(p->signaler->priority < pt->priority);
                        if (prio > READ_ONCE(p->signaler->priority))
                                list_move_tail(&p->dfs_link, &dfs);