*
*/
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
return pt;
}
-static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
-{
- cleanup_page_dma(vm, &pt->base);
- kfree(pt);
-}
-
static struct i915_page_directory *__alloc_pd(void)
{
struct i915_page_directory *pd;
pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
-
if (unlikely(!pd))
return NULL;
- memset(&pd->base, 0, sizeof(pd->base));
- atomic_set(&pd->used, 0);
+ atomic_set(px_used(pd), 0);
spin_lock_init(&pd->lock);
- /* for safety */
- pd->entry[0] = NULL;
-
return pd;
}
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, &pd->base))) {
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
kfree(pd);
return ERR_PTR(-ENOMEM);
}
return pd;
}
-static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
+static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
{
- cleanup_page_dma(vm, &pd->base);
+ cleanup_page_dma(vm, pd);
kfree(pd);
}
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
static void init_pd(struct i915_page_directory *pd,
struct i915_page_dma *scratch)
{
struct i915_page_dma * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
- GEM_BUG_ON(atomic_read(&pd->used) > 512);
+ GEM_BUG_ON(atomic_read(px_used(pd)) > 512);
- atomic_inc(&pd->used);
+ atomic_inc(px_used(pd));
pd->entry[pde] = to;
write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC));
}
struct i915_page_dma * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
- GEM_BUG_ON(atomic_read(&pd->used) == 0);
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC));
pd->entry[pde] = to;
- atomic_dec(&pd->used);
+ atomic_dec(px_used(pd));
}
#define set_pd_entry(pd, pde, to) \
static bool
release_pd_entry(struct i915_page_directory * const pd,
const unsigned short pde,
- atomic_t *counter,
+ struct i915_page_table * const pt,
struct i915_page_dma * const scratch)
{
bool free = false;
spin_lock(&pd->lock);
- if (atomic_dec_and_test(counter)) {
+ if (atomic_dec_and_test(&pt->used)) {
clear_pd_entry(pd, pde, scratch);
free = true;
}
atomic_inc(&pt->used);
gen8_ppgtt_clear_pt(vm, pt, start, length);
- if (release_pd_entry(pd, pde, &pt->used, &vm->scratch_pt))
- free_pt(vm, pt);
+ if (release_pd_entry(pd, pde, pt, &vm->scratch_pt))
+ free_px(vm, pt);
}
}
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
GEM_BUG_ON(px_base(pd) == &vm->scratch_pd);
- atomic_inc(&pd->used);
+ atomic_inc(px_used(pd));
gen8_ppgtt_clear_pd(vm, pd, start, length);
- if (release_pd_entry(pdp, pdpe, &pd->used, &vm->scratch_pd))
- free_pd(vm, pd);
+ if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd))
+ free_px(vm, pd);
}
}
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(px_base(pdp) == &vm->scratch_pdp);
- atomic_inc(&pdp->used);
+ atomic_inc(px_used(pdp));
gen8_ppgtt_clear_pdp(vm, pdp, start, length);
- if (release_pd_entry(pml4, pml4e, &pdp->used, &vm->scratch_pdp))
- free_pd(vm, pdp);
+ if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp))
+ free_px(vm, pdp);
}
}
for (i = 0; i < I915_PDES; i++) {
if (pd->entry[i] != &vm->scratch_pt)
- free_pt(vm, pd->entry[i]);
+ free_pd(vm, pd->entry[i]);
}
}
int i;
if (create)
- atomic_inc(&ppgtt->pd->used); /* never remove */
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
else
- atomic_dec(&ppgtt->pd->used);
+ atomic_dec(px_used(ppgtt->pd));
if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(ppgtt->pd);
free_pd(vm, pdp->entry[i]);
}
- free_pd(vm, pdp);
+ free_px(vm, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
}
- free_pd(&ppgtt->vm, pml4);
+ free_px(&ppgtt->vm, pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_clear_pd(vm, pd, from, start - from);
out:
if (alloc)
- free_pt(vm, alloc);
+ free_px(vm, alloc);
return ret;
}
pd = pdp->entry[pdpe];
}
}
- atomic_inc(&pd->used);
+ atomic_inc(px_used(pd));
spin_unlock(&pdp->lock);
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
goto unwind_pd;
spin_lock(&pdp->lock);
- atomic_dec(&pd->used);
+ atomic_dec(px_used(pd));
}
spin_unlock(&pdp->lock);
goto out;
unwind_pd:
- if (release_pd_entry(pdp, pdpe, &pd->used, &vm->scratch_pd))
- free_pd(vm, pd);
+ if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd))
+ free_px(vm, pd);
unwind:
gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
out:
if (alloc)
- free_pd(vm, alloc);
+ free_px(vm, alloc);
return ret;
}
pdp = pml4->entry[pml4e];
}
}
- atomic_inc(&pdp->used);
+ atomic_inc(px_used(pdp));
spin_unlock(&pml4->lock);
ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
goto unwind_pdp;
spin_lock(&pml4->lock);
- atomic_dec(&pdp->used);
+ atomic_dec(px_used(pdp));
}
spin_unlock(&pml4->lock);
goto out;
unwind_pdp:
- if (release_pd_entry(pml4, pml4e, &pdp->used, &vm->scratch_pdp))
- free_pd(vm, pdp);
+ if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp))
+ free_px(vm, pdp);
unwind:
gen8_ppgtt_clear_4lvl(vm, from, start - from);
out:
if (alloc)
- free_pd(vm, alloc);
+ free_px(vm, alloc);
return ret;
}
unwind:
gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
- atomic_set(&pdp->used, 0);
+ atomic_set(px_used(pdp), 0);
return -ENOMEM;
}
const unsigned int entries)
{
const u64 daddr = gen8_pde_encode(to->daddr, I915_CACHE_LLC);
- u64 * const vaddr = kmap_atomic(pd->base.page);
+ u64 * const vaddr = kmap_atomic_px(pd);
memset64(vaddr, daddr, entries);
kunmap_atomic(vaddr);
pd->entry[GEN8_3LVL_PDPES] = NULL;
- if (unlikely(setup_page_dma(vm, &pd->base))) {
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
kfree(pd);
return ERR_PTR(-ENOMEM);
}
return ppgtt;
err_free_pd:
- free_pd(&ppgtt->vm, ppgtt->pd);
+ free_px(&ppgtt->vm, ppgtt->pd);
err_free_scratch:
free_scratch(&ppgtt->vm);
err_free:
gen6_ppgtt_clear_range(vm, from, start - from);
out:
if (alloc)
- free_pt(vm, alloc);
+ free_px(vm, alloc);
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return ret;
}
gen6_for_all_pdes(pt, pd, pde)
if (px_base(pt) != &ppgtt->base.vm.scratch_pt)
- free_pt(&ppgtt->base.vm, pt);
+ free_px(&ppgtt->base.vm, pt);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
struct i915_page_table *pt;
unsigned int pde;
- ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
if (px_base(pt) == scratch || atomic_read(&pt->used))
continue;
- free_pt(&ppgtt->base.vm, pt);
+ free_px(&ppgtt->base.vm, pt);
pd->entry[pde] = scratch;
}