* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
ring = engine->context_pin(engine,
engine->i915->preempt_context);
if (IS_ERR(ring)) {
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
assert_ring_tail_valid(rq->ring, rq->tail);
}
-static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *rq, *rn;
struct i915_priolist *uninitialized_var(p);
}
}
+static void
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ spin_lock_irq(&engine->timeline->lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->timeline->lock);
+}
+
static inline void
execlists_context_status_change(struct drm_i915_gem_request *rq,
unsigned long status)
elsp_write(ce->lrc_desc, elsp);
}
-static bool can_preempt(struct intel_engine_cs *engine)
-{
- return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
-}
-
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
if (port_count(&port[0]) > 1)
goto unlock;
- if (can_preempt(engine) &&
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
rb_entry(rb, struct i915_priolist, node)->priority >
max(last->priotree.priority, 0)) {
/*
}
static void
-execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
buf[2*head + 1] == PREEMPT_ID) {
- execlist_cancel_port_requests(execlists);
-
- spin_lock_irq(&engine->timeline->lock);
- unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->timeline->lock);
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_PREEMPT));
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- unwind_incomplete_requests(engine);
+ __unwind_incomplete_requests(engine);
spin_unlock_irqrestore(&engine->timeline->lock, flags);