int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
- lock->base.hash.key, TTM_REF_USAGE);
+ lock->base.handle, TTM_REF_USAGE);
}
void ttm_suspend_unlock(struct ttm_lock *lock)
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
+ struct idr idr;
};
/**
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
+ idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock);
- ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
+ ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
spin_unlock(&tdev->object_lock);
- if (unlikely(ret != 0))
- goto out_err0;
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
+ base->handle = ret;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
return 0;
out_err1:
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
-out_err0:
return ret;
}
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
/*
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{
- struct ttm_base_object *base = NULL;
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tdev->object_hash;
- int ret;
+ struct ttm_base_object *base;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ base = idr_find(&tdev->idr, key);
- if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_base_object, hash);
- if (!kref_get_unless_zero(&base->refcount))
- base = NULL;
- }
+ if (base && !kref_get_unless_zero(&base->refcount))
+ base = NULL;
rcu_read_unlock();
return base;
struct ttm_ref_object *ref;
rcu_read_lock();
- if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
+ if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false;
/*
while (ret == -EINVAL) {
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
+ ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
return -ENOMEM;
}
- ref->hash.key = base->hash.key;
+ ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
ref->ref_type = ref_type;
if (ret != 0)
goto out_no_object_hash;
+ idr_init(&tdev->idr);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
*p_tdev = NULL;
+ WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
+ idr_destroy(&tdev->idr);
drm_ht_remove(&tdev->object_hash);
kfree(tdev);
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
- *handle = base->hash.key;
+ *handle = base->handle;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
struct ttm_base_object {
struct rcu_head rhead;
- struct drm_hash_item hash;
- enum ttm_object_type object_type;
- bool shareable;
struct ttm_object_file *tfile;
struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base);
void (*ref_obj_release) (struct ttm_base_object *base,
enum ttm_ref_type ref_type);
+ u32 handle;
+ enum ttm_object_type object_type;
+ u32 shareable;
};
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
+
+/*
+ * Extra memory required by the base object's idr storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per idr.
+ */
+#define TTM_OBJ_EXTRA_SIZE 128
#endif
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+ ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
+ TTM_OBJ_EXTRA_SIZE;
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
- *handle = user_bo->prime.base.hash.key;
+ *handle = user_bo->prime.base.handle;
out_no_base_object:
return ret;
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
- *handle = user_bo->prime.base.hash.key;
+ *handle = user_bo->prime.base.handle;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
}
return -EINVAL;
}
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
- ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
+ ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_err;
}
- arg->cid = ctx->base.hash.key;
+ arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
out_unlock:
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+ fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
+ TTM_OBJ_EXTRA_SIZE;
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
}
*p_fence = &ufence->fence;
- *p_handle = ufence->base.hash.key;
+ *p_handle = ufence->base.handle;
return 0;
out_err:
"object.\n");
goto out_no_ref_obj;
}
- handle = base->hash.key;
+ handle = base->handle;
}
ttm_base_object_unref(&base);
}
#include "vmwgfx_drv.h"
+/*
+ * Extra memory required by the resource id's ida storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per ida.
+ */
#define VMW_IDA_ACC_SIZE 128
enum vmw_cmdbuf_res_state {
};
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
- ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+ ttm_round_pot(sizeof(struct vmw_user_shader)) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
}
if (handle)
- *handle = ushader->base.hash.key;
+ *handle = ushader->base.handle;
out_err:
vmw_resource_unreference(&res);
out:
};
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
if (unlikely(vmw_shader_size == 0))
vmw_shader_size =
- ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+ ttm_round_pot(sizeof(struct vmw_shader)) +
+ VMW_IDA_ACC_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
- account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
+ account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
+ TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (ret)
goto out_err;
}
- func->set_arg_handle(data, usimple->base.hash.key);
+ func->set_arg_handle(data, usimple->base.handle);
out_err:
vmw_resource_unreference(&res);
out_ret:
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
num_sizes == 0)
return -EINVAL;
- size = vmw_user_surface_size + 128 +
+ size = vmw_user_surface_size +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
goto out_unlock;
}
- rep->sid = user_srf->prime.base.hash.key;
+ rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
- ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
- size = vmw_user_surface_size + 128;
+ size = vmw_user_surface_size;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
goto out_unlock;
}
- rep->handle = user_srf->prime.base.hash.key;
+ rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+ (void) ttm_ref_object_base_unref(tfile, base->handle,
TTM_REF_USAGE);
goto out_bad_resource;
}
rep->creq.base.array_size = srf->array_size;
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = srf->base_size;
- rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =