uobj->object = NULL;
- mutex_lock(&ufile->uobjects_lock);
+ spin_lock_irq(&ufile->uobjects_lock);
list_del(&uobj->list);
- mutex_unlock(&ufile->uobjects_lock);
+ spin_unlock_irq(&ufile->uobjects_lock);
/* Pairs with the get in rdma_alloc_commit_uobject() */
uverbs_uobject_put(uobj);
struct ib_uverbs_file *ufile = uobject->ufile;
/* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&ufile->cleanup_rwsem)) {
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem)) {
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
assert_uverbs_usecnt(uobject, true);
ret = _rdma_remove_commit_uobject(uobject, RDMA_REMOVE_DESTROY);
- up_read(&ufile->cleanup_rwsem);
+ up_read(&ufile->hw_destroy_rwsem);
return ret;
}
struct ib_uverbs_file *ufile = uobj->ufile;
/* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&ufile->cleanup_rwsem)) {
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem)) {
int ret;
WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
- mutex_lock(&ufile->uobjects_lock);
+ spin_lock_irq(&ufile->uobjects_lock);
list_add(&uobj->list, &ufile->uobjects);
- mutex_unlock(&ufile->uobjects_lock);
+ spin_unlock_irq(&ufile->uobjects_lock);
/* alloc_commit consumes the uobj kref */
uobj->type->type_class->alloc_commit(uobj);
- up_read(&ufile->cleanup_rwsem);
+ up_read(&ufile->hw_destroy_rwsem);
return 0;
}
struct ib_uobject *uobj = f->private_data;
struct ib_uverbs_file *ufile = uobj->ufile;
- if (down_read_trylock(&ufile->cleanup_rwsem)) {
+ if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
_uverbs_close_fd(uobj);
- up_read(&ufile->cleanup_rwsem);
+ up_read(&ufile->hw_destroy_rwsem);
}
uobj->object = NULL;
* We take and release the lock per traversal in order to let
* other threads (which might still use the FDs) chance to run.
*/
- mutex_lock(&ufile->uobjects_lock);
ufile->cleanup_reason = reason;
list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
/*
uverbs_uobject_put(obj);
ret = 0;
}
- mutex_unlock(&ufile->uobjects_lock);
return ret;
}
* want to hold this forever as the context is going to be destroyed,
* but we'll release it since it causes a "held lock freed" BUG message.
*/
- down_write(&ufile->cleanup_rwsem);
+ down_write(&ufile->hw_destroy_rwsem);
ufile->ucontext->cleanup_retryable = true;
while (!list_empty(&ufile->uobjects))
if (__uverbs_cleanup_ufile(ufile, reason)) {
if (!list_empty(&ufile->uobjects))
__uverbs_cleanup_ufile(ufile, reason);
- up_write(&ufile->cleanup_rwsem);
+ up_write(&ufile->hw_destroy_rwsem);
}
const struct uverbs_obj_type_class uverbs_fd_class = {
struct list_head list;
int is_closed;
- /* locking the uobjects_list */
- struct mutex uobjects_lock;
+ /*
+ * To access the uobjects list hw_destroy_rwsem must be held for write
+ * OR hw_destroy_rwsem held for read AND uobjects_lock held.
+ * hw_destroy_rwsem should be called across any destruction of the HW
+ * object of an associated uobject.
+ */
+ struct rw_semaphore hw_destroy_rwsem;
+ spinlock_t uobjects_lock;
struct list_head uobjects;
- /* protects cleanup process from other actions */
- struct rw_semaphore cleanup_rwsem;
enum rdma_remove_reason cleanup_reason;
struct idr idr;