#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
CONFIGFS_ATTR(nvmet_ns_, device_path);
+#ifdef CONFIG_PCI_P2PDMA
+static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
+}
+
+static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct pci_dev *p2p_dev = NULL;
+ bool use_p2pmem;
+ int ret = count;
+ int error;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
+ if (error) {
+ ret = error;
+ goto out_unlock;
+ }
+
+ ns->use_p2pmem = use_p2pmem;
+ pci_dev_put(ns->p2p_dev);
+ ns->p2p_dev = p2p_dev;
+
+out_unlock:
+ mutex_unlock(&ns->subsys->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, p2pmem);
+#endif /* CONFIG_PCI_P2PDMA */
+
static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
{
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+#ifdef CONFIG_PCI_P2PDMA
+ &nvmet_ns_attr_p2pmem,
+#endif
NULL,
};
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rculist.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
nvmet_file_ns_disable(ns);
}
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct pci_dev *p2p_dev;
+
+ if (!ns->use_p2pmem)
+ return 0;
+
+ if (!ns->bdev) {
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
+ return -EINVAL;
+ }
+
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
+ if (ret < 0)
+ return -EINVAL;
+ } else {
+ /*
+ * Right now we just check that there is p2pmem available so
+ * we can report an error to the user right away if there
+ * is not. We'll find the actual device to use once we
+ * setup the controller when the port's device is available.
+ */
+
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available for %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ pci_dev_put(p2p_dev);
+ }
+
+ return 0;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
+ struct nvmet_ns *ns)
+{
+ struct device *clients[2];
+ struct pci_dev *p2p_dev;
+ int ret;
+
+ if (!ctrl->p2p_client)
+ return;
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
+ if (ret < 0)
+ return;
+
+ p2p_dev = pci_dev_get(ns->p2p_dev);
+ } else {
+ clients[0] = ctrl->p2p_client;
+ clients[1] = nvmet_ns_dev(ns);
+
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
+ dev_name(ctrl->p2p_client), ns->device_path);
+ return;
+ }
+ }
+
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ if (ret < 0)
+ pci_dev_put(p2p_dev);
+
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
+ ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
int ret;
mutex_lock(&subsys->lock);
if (ret)
goto out_unlock;
+ ret = nvmet_p2pmem_ns_enable(ns);
+ if (ret)
+ goto out_unlock;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
if (ret)
mutex_unlock(&subsys->lock);
return ret;
out_dev_put:
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
void nvmet_ns_disable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (!ns->enabled)
list_del_rcu(&ns->dev_link);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
mutex_unlock(&subsys->lock);
/*
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+
subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
+ struct pci_dev *p2p_dev = NULL;
+
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
+ if (req->sq->ctrl && req->ns)
+ p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+ req->ns->nsid);
+
+ req->p2p_dev = NULL;
+ if (req->sq->qid && p2p_dev) {
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ req->transfer_len);
+ if (req->sg) {
+ req->p2p_dev = p2p_dev;
+ return 0;
+ }
+ }
+
+ /*
+ * If no P2P memory was available we fallback to using
+ * regular memory
+ */
+ }
+
req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
if (!req->sg)
return -ENOMEM;
void nvmet_req_free_sgl(struct nvmet_req *req)
{
- sgl_free(req->sg);
+ if (req->p2p_dev)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ else
+ sgl_free(req->sg);
+
req->sg = NULL;
req->sg_cnt = 0;
}
return __nvmet_host_allowed(subsys, hostnqn);
}
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+
+ if (!req->p2p_client)
+ return;
+
+ ctrl->p2p_client = get_device(req->p2p_client);
+
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
+ pci_dev_put(radix_tree_deref_slot(slot));
+
+ put_device(ctrl->p2p_client);
+}
+
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ nvmet_setup_p2p_ns_map(ctrl, req);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);