adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- if (a6xx_gpu->sqe_iova)
- msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
static void a6xx_crashdumper_free(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
- msm_gem_put_iova(dumper->bo, gpu->aspace);
+ msm_gem_unpin_iova(dumper->bo, gpu->aspace);
msm_gem_put_vaddr(dumper->bo);
drm_gem_object_unreference(dumper->bo);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+ msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(fb->obj[i], aspace);
+ msm_gem_unpin_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_unmap_vma(vma->aspace, vma);
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
del_vma(vma);
}
}
return ret;
}
-/* Get an iova but don't pin the memory behind it */
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
return ret;
}
-
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
return vma ? vma->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
- // XXX TODO ..
- // NOTE: probably don't need a _locked() version.. we wouldn't
- // normally unmap here, but instead just mark that it could be
- // unmapped (if the iova refcnt drops to zero), but then later
- // if another _get_iova_locked() fails we can start unmapping
- // things that are no longer needed..
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ mutex_lock(&msm_obj->lock);
+ vma = lookup_vma(obj, aspace);
+
+ if (!WARN_ON(!vma))
+ msm_gem_unmap_vma(aspace, vma);
+
+ mutex_unlock(&msm_obj->lock);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
if (!list_empty(&msm_obj->vmas)) {
- seq_puts(m, " vmas:");
+ seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s]", vma->aspace->name,
- vma->iova, vma->mapped ? "mapped" : "unmapped");
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ vma->iova, vma->mapped ? "mapped" : "unmapped",
+ vma->inuse);
seq_puts(m, "\n");
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_put_iova(obj, aspace);
+ msm_gem_unpin_iova(obj, aspace);
ret = PTR_ERR(vaddr);
goto err;
}
return;
msm_gem_put_vaddr(bo);
- msm_gem_put_iova(bo, aspace);
+ msm_gem_unpin_iova(bo, aspace);
if (locked)
drm_gem_object_put(bo);
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
bool mapped;
+ int inuse;
};
struct msm_gem_object {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
- if (!aspace || !vma->iova)
+ unsigned size = vma->node.size << PAGE_SHIFT;
+
+ /* Print a message if we try to purge a vma in use */
+ if (WARN_ON(vma->inuse > 0))
return;
- if (aspace->mmu) {
- unsigned size = vma->node.size << PAGE_SHIFT;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
- }
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
+ return;
- spin_lock(&aspace->lock);
- drm_mm_remove_node(&vma->node);
- spin_unlock(&aspace->lock);
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
- vma->iova = 0;
vma->mapped = false;
+}
- msm_gem_address_space_put(aspace);
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (!WARN_ON(!vma->iova))
+ vma->inuse--;
}
int
if (WARN_ON(!vma->iova))
return -EINVAL;
+ /* Increase the usage counter */
+ vma->inuse++;
+
if (vma->mapped)
return 0;
return ret;
}
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ return;
+
+ spin_lock(&aspace->lock);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
+ spin_unlock(&aspace->lock);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages)
return 0;
}
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_put(&msm_obj->base);
}