spin_lock(&dev_list_lock);
list_for_each_entry(dev, &dev_list, node) {
if (dev->instance == instance) {
+ if (!dev->admin_q) {
+ ret = -EWOULDBLOCK;
+ break;
+ }
if (!kref_get_unless_zero(&dev->kref))
break;
f->private_data = dev;
dev->reset_workfn(work);
}
+static void nvme_async_probe(struct work_struct *work);
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
goto release;
kref_init(&dev->kref);
- result = nvme_dev_start(dev);
- if (result)
- goto release_pools;
-
dev->device = device_create(nvme_class, &pdev->dev,
MKDEV(nvme_char_major, dev->instance),
dev, "nvme%d", dev->instance);
if (IS_ERR(dev->device)) {
result = PTR_ERR(dev->device);
- goto shutdown;
+ goto release_pools;
}
get_device(dev->device);
- if (dev->online_queues > 1)
- result = nvme_dev_add(dev);
- if (result)
- goto device_del;
-
- nvme_set_irq_hints(dev);
- dev->initialized = 1;
+ INIT_WORK(&dev->probe_work, nvme_async_probe);
+ schedule_work(&dev->probe_work);
return 0;
- device_del:
- device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
- shutdown:
- nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
return result;
}
+static void nvme_async_probe(struct work_struct *work)
+{
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
+ int result;
+
+ result = nvme_dev_start(dev);
+ if (result)
+ goto reset;
+
+ if (dev->online_queues > 1)
+ result = nvme_dev_add(dev);
+ if (result)
+ goto reset;
+
+ nvme_set_irq_hints(dev);
+ dev->initialized = 1;
+ return;
+ reset:
+ dev->reset_workfn = nvme_reset_failed_dev;
+ queue_work(nvme_workq, &dev->reset_work);
+}
+
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
spin_unlock(&dev_list_lock);
pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->probe_work);
flush_work(&dev->reset_work);
nvme_dev_shutdown(dev);
nvme_dev_remove(dev);