nvme-pci: use dma memory for the host memory buffer descriptors
authorChristoph Hellwig <hch@lst.de>
Mon, 28 Aug 2017 08:47:18 +0000 (10:47 +0200)
committerChristoph Hellwig <hch@lst.de>
Wed, 30 Aug 2017 12:46:16 +0000 (14:46 +0200)
The NVMe 1.3 specification says in section 5.21.1.13:

"After a successful completion of a Set Features enabling the host memory
 buffer, the host shall not write to the associated host memory region,
 buffer size, or descriptor list until the host memory buffer has been
 disabled."

While this doesn't state that the descriptor list must remain accessible
to the device it certainly implies it must remaing readable by the device.

So switch to a dma coherent allocation for the descriptor list just to be
safe - it's not like the cost for it matters compared to the actual
memory buffers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Fixes: 87ad72a59a38 ("nvme-pci: implement host memory buffer support")
drivers/nvme/host/pci.c

index 925467b31a333940dc62d62c4c5f3376316e4120..ea892e732268fe37bd4ea0c52b6d89fa055b691d 100644 (file)
@@ -109,6 +109,7 @@ struct nvme_dev {
        /* host memory buffer support: */
        u64 host_mem_size;
        u32 nr_host_mem_descs;
+       dma_addr_t host_mem_descs_dma;
        struct nvme_host_mem_buf_desc *host_mem_descs;
        void **host_mem_desc_bufs;
 };
@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
 
 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
 {
-       size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+       u64 dma_addr = dev->host_mem_descs_dma;
        struct nvme_command c;
-       u64 dma_addr;
        int ret;
 
-       dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
-                       DMA_TO_DEVICE);
-       if (dma_mapping_error(dev->dev, dma_addr))
-               return -ENOMEM;
-
        memset(&c, 0, sizeof(c));
        c.features.opcode       = nvme_admin_set_features;
        c.features.fid          = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
                         "failed to set host mem (err %d, flags %#x).\n",
                         ret, bits);
        }
-       dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
        return ret;
 }
 
@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
 
        kfree(dev->host_mem_desc_bufs);
        dev->host_mem_desc_bufs = NULL;
-       kfree(dev->host_mem_descs);
+       dma_free_coherent(dev->dev,
+                       dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+                       dev->host_mem_descs, dev->host_mem_descs_dma);
        dev->host_mem_descs = NULL;
 }
 
@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
 {
        struct nvme_host_mem_buf_desc *descs;
        u32 chunk_size, max_entries, len;
+       dma_addr_t descs_dma;
        int i = 0;
        void **bufs;
        u64 size = 0, tmp;
@@ -1627,7 +1624,8 @@ retry:
        tmp = (preferred + chunk_size - 1);
        do_div(tmp, chunk_size);
        max_entries = tmp;
-       descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+       descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
+                       &descs_dma, GFP_KERNEL);
        if (!descs)
                goto out;
 
@@ -1661,6 +1659,7 @@ retry:
        dev->nr_host_mem_descs = i;
        dev->host_mem_size = size;
        dev->host_mem_descs = descs;
+       dev->host_mem_descs_dma = descs_dma;
        dev->host_mem_desc_bufs = bufs;
        return 0;
 
@@ -1674,7 +1673,8 @@ out_free_bufs:
 
        kfree(bufs);
 out_free_descs:
-       kfree(descs);
+       dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+                       descs_dma);
 out:
        /* try a smaller chunk size if we failed early */
        if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {