drm/i915: push set_pages down to the callers
authorMatthew Auld <matthew.auld@intel.com>
Fri, 6 Oct 2017 22:18:17 +0000 (23:18 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 7 Oct 2017 09:11:45 +0000 (10:11 +0100)
Each backend is now responsible for calling __i915_gem_object_set_pages
upon successfully gathering its backing storage. This eliminates the
inconsistency between the async and sync paths, which stands out even
more when we start throwing around an sg_mask in a later patch.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-6-matthew.auld@intel.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-5-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/selftests/huge_gem_object.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c

index 1da1f52d12cc55c62d733df44b8106ac687f7118..42f2ca1e136b4ceb72fe885b6952df931ebc9f4a 100644 (file)
@@ -162,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
        drm_dma_handle_t *phys;
@@ -171,9 +170,10 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        struct scatterlist *sg;
        char *vaddr;
        int i;
+       int err;
 
        if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /* Always aligning to the object size, allows a single allocation
         * to handle all possible callers, and given typical object sizes,
@@ -183,7 +183,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                             roundup_pow_of_two(obj->base.size),
                             roundup_pow_of_two(obj->base.size));
        if (!phys)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        vaddr = phys->vaddr;
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -192,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
                page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page)) {
-                       st = ERR_CAST(page);
+                       err = PTR_ERR(page);
                        goto err_phys;
                }
 
@@ -209,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st) {
-               st = ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
                goto err_phys;
        }
 
        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
                kfree(st);
-               st = ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
                goto err_phys;
        }
 
@@ -227,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        sg_dma_len(sg) = obj->base.size;
 
        obj->phys_handle = phys;
-       return st;
+
+       __i915_gem_object_set_pages(obj, st);
+
+       return 0;
 
 err_phys:
        drm_pci_free(obj->base.dev, phys);
-       return st;
+
+       return err;
 }
 
 static void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -2292,8 +2296,7 @@ static bool i915_sg_trim(struct sg_table *orig_st)
        return true;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const unsigned long page_count = obj->base.size / PAGE_SIZE;
@@ -2317,12 +2320,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
 rebuild_st:
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Get the list of pages out of our struct file.  They'll be pinned
@@ -2430,7 +2433,9 @@ rebuild_st:
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);
 
-       return st;
+       __i915_gem_object_set_pages(obj, st);
+
+       return 0;
 
 err_sg:
        sg_mark_end(sg);
@@ -2451,7 +2456,7 @@ err_pages:
        if (ret == -ENOSPC)
                ret = -ENOMEM;
 
-       return ERR_PTR(ret);
+       return ret;
 }
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
@@ -2474,19 +2479,17 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct sg_table *pages;
+       int err;
 
        if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
                DRM_DEBUG("Attempting to obtain a purgeable object\n");
                return -EFAULT;
        }
 
-       pages = obj->ops->get_pages(obj);
-       if (unlikely(IS_ERR(pages)))
-               return PTR_ERR(pages);
+       err = obj->ops->get_pages(obj);
+       GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
 
-       __i915_gem_object_set_pages(obj, pages);
-       return 0;
+       return err;
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
index 6176e589cf09f9b287cf25700a27a170255d6fe8..4c4dc85159fb4b31e7b62743ceb2a19903ae82f9 100644 (file)
@@ -256,11 +256,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
        return drm_gem_dmabuf_export(dev, &exp_info);
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-       return dma_buf_map_attachment(obj->base.import_attach,
-                                     DMA_BIDIRECTIONAL);
+       struct sg_table *pages;
+
+       pages = dma_buf_map_attachment(obj->base.import_attach,
+                                      DMA_BIDIRECTIONAL);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       __i915_gem_object_set_pages(obj, pages);
+
+       return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
index c1f64ddaf8aa627010a73d4a08bc55d46e32d745..f59764da42544a413370a0be93c0d0fdda5df673 100644 (file)
@@ -44,8 +44,7 @@ static void internal_free_pages(struct sg_table *st)
        kfree(st);
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *st;
@@ -78,12 +77,12 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 create_st:
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        npages = obj->base.size / PAGE_SIZE;
        if (sg_alloc_table(st, npages, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        sg = st->sgl;
@@ -132,13 +131,17 @@ create_st:
         * object are only valid whilst active and pinned.
         */
        obj->mm.madv = I915_MADV_DONTNEED;
-       return st;
+
+       __i915_gem_object_set_pages(obj, st);
+
+       return 0;
 
 err:
        sg_set_page(sg, NULL, 0, 0);
        sg_mark_end(sg);
        internal_free_pages(st);
-       return ERR_PTR(-ENOMEM);
+
+       return -ENOMEM;
 }
 
 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
index c30d8f80818587bfe4500a08b377b2ac45836f68..036e847b27f0a2c10b57d479bb9e55f329266e64 100644 (file)
@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops {
         * being released or under memory pressure (where we attempt to
         * reap pages for the shrinker).
         */
-       struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+       int (*get_pages)(struct drm_i915_gem_object *);
        void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
        int (*pwrite)(struct drm_i915_gem_object *,
index 507c9f0d8df1267a5a05c285800a8ac3baef77b7..537ecb224db0ee4c874205d6f728d356da7a8784 100644 (file)
@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev,
        return st;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 {
-       return i915_pages_create_for_stolen(obj->base.dev,
-                                           obj->stolen->start,
-                                           obj->stolen->size);
+       struct sg_table *pages =
+               i915_pages_create_for_stolen(obj->base.dev,
+                                            obj->stolen->start,
+                                            obj->stolen->size);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       __i915_gem_object_set_pages(obj, pages);
+
+       return 0;
 }
 
 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
index 2d4996de733120b155a613f5f60137b3afcecbcb..70ad7489827dfdf23a0b8c212bd4ef6c53eb69a6 100644 (file)
@@ -434,6 +434,8 @@ alloc_table:
                return ERR_PTR(ret);
        }
 
+       __i915_gem_object_set_pages(obj, st);
+
        return st;
 }
 
@@ -521,7 +523,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                        pages = __i915_gem_userptr_alloc_pages(obj, pvec,
                                                               npages);
                        if (!IS_ERR(pages)) {
-                               __i915_gem_object_set_pages(obj, pages);
                                pinned = 0;
                                pages = NULL;
                        }
@@ -582,8 +583,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
        return ERR_PTR(-EAGAIN);
 }
 
-static struct sg_table *
-i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
        const int num_pages = obj->base.size >> PAGE_SHIFT;
        struct mm_struct *mm = obj->userptr.mm->mm;
@@ -612,9 +612,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
        if (obj->userptr.work) {
                /* active flag should still be held for the pending work */
                if (IS_ERR(obj->userptr.work))
-                       return ERR_CAST(obj->userptr.work);
+                       return PTR_ERR(obj->userptr.work);
                else
-                       return ERR_PTR(-EAGAIN);
+                       return -EAGAIN;
        }
 
        pvec = NULL;
@@ -650,7 +650,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
                release_pages(pvec, pinned, 0);
        kvfree(pvec);
 
-       return pages;
+       return PTR_ERR_OR_ZERO(pages);
 }
 
 static void
index c5c7e8efbdd3420a0da450044993e04d9a4f834b..41c15f3aa467a4f25822cfc13c98a0239d123251 100644 (file)
@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
        kfree(pages);
 }
 
-static struct sg_table *
-huge_get_pages(struct drm_i915_gem_object *obj)
+static int huge_get_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
        const unsigned long nreal = obj->scratch / PAGE_SIZE;
@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj)
 
        pages = kmalloc(sizeof(*pages), GFP);
        if (!pages)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        if (sg_alloc_table(pages, npages, GFP)) {
                kfree(pages);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        sg = pages->sgl;
@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj)
        if (i915_gem_gtt_prepare_pages(obj, pages))
                goto err;
 
-       return pages;
+       __i915_gem_object_set_pages(obj, pages);
+
+       return 0;
 
 err:
        huge_free_pages(obj, pages);
-       return ERR_PTR(-ENOMEM);
+
+       return -ENOMEM;
 #undef GFP
 }
 
index 6b132caffa184d8689a495613857bba1475022ef..aa1db375d59ab59940ca4d5bf568c4350eeba560 100644 (file)
@@ -39,8 +39,7 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
        kfree(pages);
 }
 
-static struct sg_table *
-fake_get_pages(struct drm_i915_gem_object *obj)
+static int fake_get_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
 #define PFN_BIAS 0x1000
@@ -50,12 +49,12 @@ fake_get_pages(struct drm_i915_gem_object *obj)
 
        pages = kmalloc(sizeof(*pages), GFP);
        if (!pages)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        rem = round_up(obj->base.size, BIT(31)) >> 31;
        if (sg_alloc_table(pages, rem, GFP)) {
                kfree(pages);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        rem = obj->base.size;
@@ -72,7 +71,10 @@ fake_get_pages(struct drm_i915_gem_object *obj)
        GEM_BUG_ON(rem);
 
        obj->mm.madv = I915_MADV_DONTNEED;
-       return pages;
+
+       __i915_gem_object_set_pages(obj, pages);
+
+       return 0;
 #undef GFP
 }