drm/i915: Protect WC stash allocation against direct reclaim
authorChris Wilson <chris@chris-wilson.co.uk>
Sun, 21 Jan 2018 17:31:43 +0000 (17:31 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 22 Jan 2018 12:22:04 +0000 (12:22 +0000)
As we attempt to allocate pages for use in a new WC stash, direct
reclaim may run underneath us and fill up the WC stash. We have to be
careful then not to overflow the pvec.

Fixes: 66df1014efba ("drm/i915: Keep a small stash of preallocated WC pages")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103109
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180121173143.17090-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_gtt.c

index 26dee5e61e997e86bec39bbc2bfe32cebc647168..be227512430aba9dbb57f05928b61da62206ef44 100644 (file)
@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 {
        struct pagevec *pvec = &vm->free_pages;
+       struct pagevec stash;
 
        if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
                i915_gem_shrink_all(vm->i915);
@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
        if (likely(pvec->nr))
                return pvec->pages[--pvec->nr];
 
-       /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+       /*
+        * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+        *
+        * We have to be careful as page allocation may trigger the shrinker
+        * (via direct reclaim) which will fill up the WC stash underneath us.
+        * So we add our WB pages into a temporary pvec on the stack and merge
+        * them into the WC stash after all the allocations are complete.
+        */
+       pagevec_init(&stash);
        do {
                struct page *page;
 
@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
                if (unlikely(!page))
                        break;
 
-               pvec->pages[pvec->nr++] = page;
-       } while (pagevec_space(pvec));
+               stash.pages[stash.nr++] = page;
+       } while (stash.nr < pagevec_space(pvec));
 
-       if (unlikely(!pvec->nr))
-               return NULL;
+       if (stash.nr) {
+               int nr = min_t(int, stash.nr, pagevec_space(pvec));
+               struct page **pages = stash.pages + stash.nr - nr;
 
-       set_pages_array_wc(pvec->pages, pvec->nr);
+               if (nr && !set_pages_array_wc(pages, nr)) {
+                       memcpy(pvec->pages + pvec->nr,
+                              pages, sizeof(pages[0]) * nr);
+                       pvec->nr += nr;
+                       stash.nr -= nr;
+               }
+
+               pagevec_release(&stash);
+       }
 
-       return pvec->pages[--pvec->nr];
+       return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
 }
 
 static void vm_free_pages_release(struct i915_address_space *vm,