drm/i915: Pass around the intel_context
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 6 Mar 2019 08:47:04 +0000 (08:47 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 6 Mar 2019 10:16:33 +0000 (10:16 +0000)
Instead of passing the gem_context and engine to find the instance of
the intel_context to use, pass around the intel_context instead. This is
useful for the next few patches, where the intel_context is no longer a
direct lookup.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190306084704.15755-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_lrc.c

index ff039750069d796fb18d12964cfe0728215f18ab..3b8e354ec8b31ff2a474b16b943229e4b9ddd36a 100644 (file)
@@ -3112,7 +3112,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file);
 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
-                           struct i915_gem_context *ctx,
+                           struct intel_context *ce,
                            u32 *reg_state);
 
 /* i915_gem_evict.c */
index e5aea176c1daef49d4e2eda4fb6a47aa74d23639..a0d145f976ec2fe4e055c3d489a0782a57ea1501 100644 (file)
@@ -1629,13 +1629,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  * It's fine to put out-of-date values into these per-context registers
  * in the case that the OA unit has been disabled.
  */
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
-                                          u32 *reg_state,
-                                          const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(struct intel_context *ce,
+                              u32 *reg_state,
+                              const struct i915_oa_config *oa_config)
 {
-       struct drm_i915_private *dev_priv = ctx->i915;
-       u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
-       u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+       struct drm_i915_private *i915 = ce->gem_context->i915;
+       u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+       u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
        /* The MMIO offsets for Flex EU registers aren't contiguous */
        i915_reg_t flex_regs[] = {
                EU_PERF_CNTL0,
@@ -1649,8 +1650,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
        int i;
 
        CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
-               (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
-               (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+               (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+               (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
                GEN8_OA_COUNTER_RESUME);
 
        for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1678,10 +1679,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
                CTX_REG(reg_state, state_offset, flex_regs[i], value);
        }
 
-       CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
-               gen8_make_rpcs(dev_priv,
-                              &to_intel_context(ctx,
-                                                dev_priv->engine[RCS0])->sseu));
+       CTX_REG(reg_state,
+               CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+               gen8_make_rpcs(i915, &ce->sseu));
 }
 
 /*
@@ -1754,7 +1754,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
                ce->state->obj->mm.dirty = true;
                regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
 
-               gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+               gen8_update_reg_state_unlocked(ce, regs, oa_config);
 
                i915_gem_object_unpin_map(ce->state->obj);
        }
@@ -2138,8 +2138,8 @@ err_config:
 }
 
 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
-                           struct i915_gem_context *ctx,
-                           u32 *reg_state)
+                           struct intel_context *ce,
+                           u32 *regs)
 {
        struct i915_perf_stream *stream;
 
@@ -2148,7 +2148,7 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
 
        stream = engine->i915->perf.oa.exclusive_stream;
        if (stream)
-               gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
+               gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
 }
 
 /**
index 6ec6c4e175a23094544bc3d9909a78f4932f5159..f0ba20f2b41d5e451714120cd2c8f8ce08b6817c 100644 (file)
@@ -170,7 +170,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                                            struct intel_engine_cs *engine,
                                            struct intel_context *ce);
 static void execlists_init_reg_state(u32 *reg_state,
-                                    struct i915_gem_context *ctx,
+                                    struct intel_context *ce,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring);
 
@@ -1320,8 +1320,8 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
 
        /* RPCS */
        if (engine->class == RENDER_CLASS)
-               regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
-                                                              &ce->sseu);
+               regs[CTX_R_PWR_CLK_STATE + 1] =
+                       gen8_make_rpcs(engine->i915, &ce->sseu);
 }
 
 static struct intel_context *
@@ -2021,7 +2021,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
        rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
        intel_ring_update_space(rq->ring);
 
-       execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
+       execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
        __execlists_update_reg_state(engine, rq->hw_context);
 
 out_unlock:
@@ -2659,13 +2659,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 }
 
 static void execlists_init_reg_state(u32 *regs,
-                                    struct i915_gem_context *ctx,
+                                    struct intel_context *ce,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       u32 base = engine->mmio_base;
+       struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
        bool rcs = engine->class == RENDER_CLASS;
+       u32 base = engine->mmio_base;
 
        /* A context is actually a big batch buffer with several
         * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2680,7 +2680,7 @@ static void execlists_init_reg_state(u32 *regs,
        CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
                _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
                _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
-       if (INTEL_GEN(dev_priv) < 11) {
+       if (INTEL_GEN(engine->i915) < 11) {
                regs[CTX_CONTEXT_CONTROL + 1] |=
                        _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
                                            CTX_CTRL_RS_CTX_ENABLE);
@@ -2735,33 +2735,33 @@ static void execlists_init_reg_state(u32 *regs,
        CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
        CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-       if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
+       if (i915_vm_is_48bit(&ppgtt->vm)) {
                /* 64b PPGTT (48bit canonical)
                 * PDP0_DESCRIPTOR contains the base address to PML4 and
                 * other PDP Descriptors are ignored.
                 */
-               ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+               ASSIGN_CTX_PML4(ppgtt, regs);
        } else {
-               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
-               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
-               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
-               ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
+               ASSIGN_CTX_PDP(ppgtt, regs, 3);
+               ASSIGN_CTX_PDP(ppgtt, regs, 2);
+               ASSIGN_CTX_PDP(ppgtt, regs, 1);
+               ASSIGN_CTX_PDP(ppgtt, regs, 0);
        }
 
        if (rcs) {
                regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
 
-               i915_oa_init_reg_state(engine, ctx, regs);
+               i915_oa_init_reg_state(engine, ce, regs);
        }
 
        regs[CTX_END] = MI_BATCH_BUFFER_END;
-       if (INTEL_GEN(dev_priv) >= 10)
+       if (INTEL_GEN(engine->i915) >= 10)
                regs[CTX_END] |= BIT(0);
 }
 
 static int
-populate_lr_context(struct i915_gem_context *ctx,
+populate_lr_context(struct intel_context *ce,
                    struct drm_i915_gem_object *ctx_obj,
                    struct intel_engine_cs *engine,
                    struct intel_ring *ring)
@@ -2807,11 +2807,12 @@ populate_lr_context(struct i915_gem_context *ctx,
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
        regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
-       execlists_init_reg_state(regs, ctx, engine, ring);
+       execlists_init_reg_state(regs, ce, engine, ring);
        if (!engine->default_state)
                regs[CTX_CONTEXT_CONTROL + 1] |=
                        _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
-       if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
+       if (ce->gem_context == engine->i915->preempt_context &&
+           INTEL_GEN(engine->i915) < 11)
                regs[CTX_CONTEXT_CONTROL + 1] |=
                        _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                           CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
@@ -2866,7 +2867,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                goto error_deref_obj;
        }
 
-       ret = populate_lr_context(ctx, ctx_obj, engine, ring);
+       ret = populate_lr_context(ce, ctx_obj, engine, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
                goto error_ring_free;