drm/i915/gvt: Enable gfx virtualiztion for CFL
authorfred gao <fred.gao@intel.com>
Wed, 9 Jan 2019 01:20:07 +0000 (09:20 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Thu, 10 Jan 2019 03:36:43 +0000 (11:36 +0800)
Use INTEL_GEN to simplify the code for SKL+ platforms.

v2:
- split the enabling code into final one to identify any regression.

Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Fei Jiang <fei.jiang@intel.com>
Signed-off-by: fred gao <fred.gao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c

index cae00e6debafc7f2108c5dea2afba5d3c649b4c5..a04e8aa58547df109be0167af26489db32953c9a 100644 (file)
@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
         * It's good enough to support initializing mmio by lri command in
         * vgpu inhibit context on KBL.
         */
-       if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+       if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
+               || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
                        intel_gvt_mmio_is_in_ctx(gvt, offset) &&
                        !strncmp(cmd, "lri", 3)) {
                intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
        if (!info->async_flip)
                return 0;
 
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
                tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
                                GENMASK(12, 10)) >> 10;
@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
 
        set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
                      info->surf_val << 12);
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
                              info->stride_val);
                set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
 
        if (IS_BROADWELL(dev_priv))
                return gen8_decode_mi_display_flip(s, info);
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 9)
                return skl_decode_mi_display_flip(s, info);
 
        return -ENODEV;
index df1e14145747ca9666dae9c8bdf7273cc69c4b46..4f25b6b7728ef32e181f3e89627c92d2d78d2033 100644 (file)
@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        SDE_PORTC_HOTPLUG_CPT |
                        SDE_PORTD_HOTPLUG_CPT);
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+           IS_COFFEELAKE(dev_priv)) {
                vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
                                SDE_PORTE_HOTPLUG_SPT);
                vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
        }
 
-       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+            IS_COFFEELAKE(dev_priv)) &&
                        intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
        }
@@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+           IS_COFFEELAKE(dev_priv))
                clean_virtual_dp_monitor(vgpu, PORT_D);
        else
                clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
 
        intel_vgpu_init_i2c_edid(vgpu);
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+           IS_COFFEELAKE(dev_priv))
                return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
                                                resolution);
        else
index 2eb681175faecb6e0380c8c2b0b5ad584c8d6a91..3e7e2b80c8579017cecdda478bc6166e1f46e061 100644 (file)
@@ -163,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 
        obj->read_domains = I915_GEM_DOMAIN_GTT;
        obj->write_domain = 0;
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
index 481896fb712abf4c178b28af2f224a369d0aefd9..dbd91ef28886368982382d63b5c09ca7858643a7 100644 (file)
@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
        u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
        u32 stride = stride_reg;
 
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                switch (tiled) {
                case PLANE_CTL_TILED_LINEAR:
                        stride = stride_reg * 64;
@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (!plane->enabled)
                return -ENODEV;
 
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                plane->tiled = val & PLANE_CTL_TILED_MASK;
                fmt = skl_format_to_drm(
                        val & PLANE_CTL_FORMAT_MASK,
@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        }
 
        plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
-               (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv)) ?
+               (INTEL_GEN(dev_priv) >= 9) ?
                        (_PRI_PLANE_STRIDE_MASK >> 6) :
                                _PRI_PLANE_STRIDE_MASK, plane->bpp);
 
index 9910ba16d815036ad9ac5d2a849f704c5a1b2214..68a62ba5bf54cc84035556490d134f5e51794b9a 100644 (file)
@@ -283,9 +283,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        old = vgpu_vreg(vgpu, offset);
        new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
 
-       if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)
-               || IS_BROXTON(vgpu->gvt->dev_priv)) {
+       if (INTEL_GEN(vgpu->gvt->dev_priv)  >=  9) {
                switch (offset) {
                case FORCEWAKE_RENDER_GEN9_REG:
                        ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -891,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
-       if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)
-               || IS_BROXTON(vgpu->gvt->dev_priv))
+       if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
                && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
                /* SKL DPB/C/D aux ctl register changed */
                return 0;
@@ -1409,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
        switch (cmd) {
        case GEN9_PCODE_READ_MEM_LATENCY:
                if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-                        || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+                        || IS_KABYLAKE(vgpu->gvt->dev_priv)
+                        || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
                        /**
                         * "Read memory latency" command on gen9.
                         * Below memory latency values are read
@@ -1433,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
                break;
        case SKL_PCODE_CDCLK_CONTROL:
                if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-                        || IS_KABYLAKE(vgpu->gvt->dev_priv))
+                        || IS_KABYLAKE(vgpu->gvt->dev_priv)
+                        || IS_COFFEELAKE(vgpu->gvt->dev_priv))
                        *data0 = SKL_CDCLK_READY_FOR_CHANGE;
                break;
        case GEN6_PCODE_READ_RC6VIDS:
@@ -3304,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                if (ret)
                        goto err;
        } else if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)) {
+               || IS_KABYLAKE(dev_priv)
+               || IS_COFFEELAKE(dev_priv)) {
                ret = init_broadwell_mmio_info(gvt);
                if (ret)
                        goto err;
index 6b9d1354ff29be770a68f75d1f5506070c45496d..67125c5eec6eb59c7502065ae2c4a3698b390822 100644 (file)
@@ -581,9 +581,7 @@ static void gen8_init_irq(
 
                SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
                SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
-       } else if (IS_SKYLAKE(gvt->dev_priv)
-                       || IS_KABYLAKE(gvt->dev_priv)
-                       || IS_BROXTON(gvt->dev_priv)) {
+       } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
                SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
index 36a5147cd01e5224b2c6563c29128d05688e7fba..893de7267b1f7118074430fc96bf20b9f00f9530 100644 (file)
@@ -351,8 +351,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
         */
        fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                            FW_REG_READ | FW_REG_WRITE);
-       if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
-                       IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
+       if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
                fw |= FORCEWAKE_RENDER;
 
        intel_uncore_forcewake_get(dev_priv, fw);
@@ -389,7 +388,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
                return;
 
-       if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
+       if ((IS_KABYLAKE(dev_priv)  || IS_BROXTON(dev_priv)
+               || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
                return;
 
        if (!pre && !gen9_render_mocs.initialized)
@@ -455,9 +455,7 @@ static void switch_mmio(struct intel_vgpu *pre,
        u32 old_v, new_v;
 
        dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-       if (IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv)
-               || IS_BROXTON(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 9)
                switch_mocs(pre, next, ring_id);
 
        for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -469,8 +467,8 @@ static void switch_mmio(struct intel_vgpu *pre,
                 * state image on kabylake, it's initialized by lri command and
                 * save or restore with context together.
                 */
-               if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-                       && mmio->in_context)
+               if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
+                       || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
                        continue;
 
                // save
@@ -563,9 +561,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 {
        struct engine_mmio *mmio;
 
-       if (IS_SKYLAKE(gvt->dev_priv) ||
-               IS_KABYLAKE(gvt->dev_priv) ||
-               IS_BROXTON(gvt->dev_priv))
+       if (INTEL_GEN(gvt->dev_priv) >= 9)
                gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
        else
                gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
index 1ad8c5e1455d782160d15c4a1c83cb6f64dad3cf..fb7445baa139a5bd43240bafb2357b8d780f13f5 100644 (file)
@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
        void *shadow_ring_buffer_va;
        u32 *cs;
 
-       if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+       if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
+               || IS_COFFEELAKE(req->i915))
                && is_inhibit_context(req->hw_context))
                intel_vgpu_restore_inhibit_context(vgpu, req);
 
@@ -939,9 +940,7 @@ static int workload_thread(void *priv)
        struct intel_vgpu_workload *workload = NULL;
        struct intel_vgpu *vgpu = NULL;
        int ret;
-       bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
-                       || IS_KABYLAKE(gvt->dev_priv)
-                       || IS_BROXTON(gvt->dev_priv);
+       bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        kfree(p);