#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define ACTIVE_PRIORITY (I915_PRIORITY_NOSEMAPHORE)
-
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
/*
* On unwinding the active request, we give it a priority bump
- * equivalent to a freshly submitted request. This protects it from
- * being gazumped again, but it would be preferable if we didn't
- * let it be gazumped in the first place!
- *
- * See __unwind_incomplete_requests()
+ * if it has completed waiting on any semaphore. If we know that
+ * the request has already started, we can prevent an unwanted
+ * preempt-to-idle cycle by taking that into account now.
*/
- if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
- /*
- * After preemption, we insert the active request at the
- * end of the new priority level. This means that we will be
- * _lower_ priority than the preemptee all things equal (and
- * so the preemption is valid), so adjust our comparison
- * accordingly.
- */
- prio |= ACTIVE_PRIORITY;
- prio--;
- }
+ if (__i915_request_has_started(rq))
+ prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
return prio | __NO_PREEMPTION;
}
static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine, int boost)
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | boost;
+ int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock);
active = rq;
}
- /*
- * The active request is now effectively the start of a new client
- * stream, so give it the equivalent small priority bump to prevent
- * it being gazumped a second time by another peer.
- *
- * Note we have to be careful not to apply a priority boost to a request
- * still spinning on its semaphores. If the request hasn't started, that
- * means it is still waiting for its dependencies to be signaled, and
- * if we apply a priority boost to this request, we will boost it past
- * its signalers and so break PI.
- *
- * One consequence of this preemption boost is that we may jump
- * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
- * making those priorities non-preemptible. They will be moved forward
- * in the priority queue, but they will not gain immediate access to
- * the GPU.
- */
- if (~prio & boost && __i915_request_has_started(active)) {
- prio |= boost;
- GEM_BUG_ON(active->sched.attr.priority >= prio);
- active->sched.attr.priority = prio;
- list_move_tail(&active->sched.link,
- i915_sched_lookup_priolist(engine, prio));
- }
-
return active;
}
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- return __unwind_incomplete_requests(engine, 0);
+ return __unwind_incomplete_requests(engine);
}
static inline void
execlists_cancel_port_requests(execlists);
__unwind_incomplete_requests(container_of(execlists,
struct intel_engine_cs,
- execlists),
- ACTIVE_PRIORITY);
+ execlists));
}
static void execlists_dequeue(struct intel_engine_cs *engine)
execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- rq = __unwind_incomplete_requests(engine, 0);
+ rq = __unwind_incomplete_requests(engine);
if (!rq)
goto out_replay;