struct nvme_ns *ns, *next;
LIST_HEAD(rm_list);
- mutex_lock(&ctrl->namespaces_mutex);
+ down_write(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->disk && nvme_revalidate_disk(ns->disk)) {
list_move_tail(&ns->list, &rm_list);
}
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
struct nvme_ns *ns;
int ret;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
kref_get(&ns->kref);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
ret = nvme_user_cmd(ctrl, ns, argp);
nvme_put_ns(ns);
return ret;
out_unlock:
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
return ret;
}
{
struct nvme_ns *ns, *ret = NULL;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
if (!kref_get_unless_zero(&ns->kref))
if (ns->head->ns_id > nsid)
break;
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
return ret;
}
__nvme_revalidate_disk(disk, id);
- mutex_lock(&ctrl->namespaces_mutex);
+ down_write(&ctrl->namespaces_rwsem);
list_add_tail(&ns->list, &ctrl->namespaces);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_write(&ctrl->namespaces_rwsem);
nvme_get_ctrl(ctrl);
list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock);
- mutex_lock(&ns->ctrl->namespaces_mutex);
+ down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
- mutex_unlock(&ns->ctrl->namespaces_mutex);
+ up_write(&ns->ctrl->namespaces_rwsem);
synchronize_srcu(&ns->head->srcu);
nvme_mpath_check_last_path(ns);
struct nvme_ns *ns, *next;
LIST_HEAD(rm_list);
- mutex_lock(&ctrl->namespaces_mutex);
+ down_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
if (ns->head->ns_id > nsid)
list_move_tail(&ns->list, &rm_list);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
}
nvme_scan_ns_sequential(ctrl, nn);
done:
- mutex_lock(&ctrl->namespaces_mutex);
+ down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_write(&ctrl->namespaces_rwsem);
kfree(id);
}
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
- mutex_lock(&ctrl->namespaces_mutex);
+ down_write(&ctrl->namespaces_rwsem);
list_splice_init(&ctrl->namespaces, &ns_list);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &ns_list, list)
nvme_ns_remove(ns);
ctrl->state = NVME_CTRL_NEW;
spin_lock_init(&ctrl->lock);
INIT_LIST_HEAD(&ctrl->namespaces);
- mutex_init(&ctrl->namespaces_mutex);
+ init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
/* Forcibly unquiesce queues to avoid blocking dispatch */
if (ctrl->admin_q)
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_unfreeze_queue(ns->queue);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_freeze_queue_wait(ns->queue);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_freeze_queue_start(ns->queue);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_quiesce_queue(ns->queue);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_unquiesce_queue(ns->queue);
- mutex_unlock(&ctrl->namespaces_mutex);
+ up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);