drm/i915: Move i915_vma_move_to_active() to i915_vma.c
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 6 Jul 2018 10:39:45 +0000 (11:39 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 6 Jul 2018 17:22:41 +0000 (18:22 +0100)
i915_vma_move_to_active() has grown beyond its execbuf origins, and
should take its rightful place in i915_vma.c as a method for i915_vma!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-4-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h

index 07846e63671d0c33d59533cddae9e4bd96a2126a..c790081777083c402948c5346b02f92b6f10c1ec 100644 (file)
@@ -3090,9 +3090,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 }
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_vma_move_to_active(struct i915_vma *vma,
-                                        struct i915_request *rq,
-                                        unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
index 97136e4ce91db1f7abd6d49eba9619c964e095f4..3f0c612d42e786d44cff5c86b59bb1da27c0fea9 100644 (file)
@@ -1868,67 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
        return true;
 }
 
-static void export_fence(struct i915_vma *vma,
-                        struct i915_request *rq,
-                        unsigned int flags)
-{
-       struct reservation_object *resv = vma->resv;
-
-       /*
-        * Ignore errors from failing to allocate the new fence, we can't
-        * handle an error right now. Worst case should be missed
-        * synchronisation leading to rendering corruption.
-        */
-       reservation_object_lock(resv, NULL);
-       if (flags & EXEC_OBJECT_WRITE)
-               reservation_object_add_excl_fence(resv, &rq->fence);
-       else if (reservation_object_reserve_shared(resv) == 0)
-               reservation_object_add_shared_fence(resv, &rq->fence);
-       reservation_object_unlock(resv);
-}
-
-int i915_vma_move_to_active(struct i915_vma *vma,
-                           struct i915_request *rq,
-                           unsigned int flags)
-{
-       struct drm_i915_gem_object *obj = vma->obj;
-       const unsigned int idx = rq->engine->id;
-
-       lockdep_assert_held(&rq->i915->drm.struct_mutex);
-       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
-       /*
-        * Add a reference if we're newly entering the active list.
-        * The order in which we add operations to the retirement queue is
-        * vital here: mark_active adds to the start of the callback list,
-        * such that subsequent callbacks are called first. Therefore we
-        * add the active reference first and queue for it to be dropped
-        * *last*.
-        */
-       if (!i915_vma_is_active(vma))
-               obj->active_count++;
-       i915_vma_set_active(vma, idx);
-       i915_gem_active_set(&vma->last_read[idx], rq);
-       list_move_tail(&vma->vm_link, &vma->vm->active_list);
-
-       obj->write_domain = 0;
-       if (flags & EXEC_OBJECT_WRITE) {
-               obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
-               if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
-                       i915_gem_active_set(&obj->frontbuffer_write, rq);
-
-               obj->read_domains = 0;
-       }
-       obj->read_domains |= I915_GEM_GPU_DOMAINS;
-
-       if (flags & EXEC_OBJECT_NEEDS_FENCE)
-               i915_gem_active_set(&vma->last_fence, rq);
-
-       export_fence(vma, rq, flags);
-       return 0;
-}
-
 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 {
        u32 *cs;
index 518de47111ff262d30a9b4a6e872e2665dfc4365..6f3a0f2296c22cfe3ccb8c942f5c34c179d21c32 100644 (file)
@@ -859,6 +859,67 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
                list_del(&vma->obj->userfault_link);
 }
 
+static void export_fence(struct i915_vma *vma,
+                        struct i915_request *rq,
+                        unsigned int flags)
+{
+       struct reservation_object *resv = vma->resv;
+
+       /*
+        * Ignore errors from failing to allocate the new fence, we can't
+        * handle an error right now. Worst case should be missed
+        * synchronisation leading to rendering corruption.
+        */
+       reservation_object_lock(resv, NULL);
+       if (flags & EXEC_OBJECT_WRITE)
+               reservation_object_add_excl_fence(resv, &rq->fence);
+       else if (reservation_object_reserve_shared(resv) == 0)
+               reservation_object_add_shared_fence(resv, &rq->fence);
+       reservation_object_unlock(resv);
+}
+
+int i915_vma_move_to_active(struct i915_vma *vma,
+                           struct i915_request *rq,
+                           unsigned int flags)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+       const unsigned int idx = rq->engine->id;
+
+       lockdep_assert_held(&rq->i915->drm.struct_mutex);
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+       /*
+        * Add a reference if we're newly entering the active list.
+        * The order in which we add operations to the retirement queue is
+        * vital here: mark_active adds to the start of the callback list,
+        * such that subsequent callbacks are called first. Therefore we
+        * add the active reference first and queue for it to be dropped
+        * *last*.
+        */
+       if (!i915_vma_is_active(vma))
+               obj->active_count++;
+       i915_vma_set_active(vma, idx);
+       i915_gem_active_set(&vma->last_read[idx], rq);
+       list_move_tail(&vma->vm_link, &vma->vm->active_list);
+
+       obj->write_domain = 0;
+       if (flags & EXEC_OBJECT_WRITE) {
+               obj->write_domain = I915_GEM_DOMAIN_RENDER;
+
+               if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+                       i915_gem_active_set(&obj->frontbuffer_write, rq);
+
+               obj->read_domains = 0;
+       }
+       obj->read_domains |= I915_GEM_GPU_DOMAINS;
+
+       if (flags & EXEC_OBJECT_NEEDS_FENCE)
+               i915_gem_active_set(&vma->last_fence, rq);
+
+       export_fence(vma, rq, flags);
+       return 0;
+}
+
 int i915_vma_unbind(struct i915_vma *vma)
 {
        unsigned long active;
index 66a228931517fb84eeeebef6a086bf215c71297d..a218b689e418a7480fcf551aeefee59961467a82 100644 (file)
@@ -215,6 +215,10 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
        return vma->active & BIT(engine);
 }
 
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+                                        struct i915_request *rq,
+                                        unsigned int flags);
+
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));