dma-mapping: bypass indirect calls for dma-direct
authorChristoph Hellwig <hch@lst.de>
Thu, 6 Dec 2018 21:39:32 +0000 (13:39 -0800)
committerChristoph Hellwig <hch@lst.de>
Thu, 13 Dec 2018 20:06:18 +0000 (21:06 +0100)
Avoid expensive indirect calls in the fast path DMA mapping
operations by directly calling the dma_direct_* ops if we are using
the directly mapped DMA operations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Tony Luck <tony.luck@intel.com>
20 files changed:
arch/alpha/include/asm/dma-mapping.h
arch/arc/mm/cache.c
arch/arm/include/asm/dma-mapping.h
arch/arm/mm/dma-mapping-nommu.c
arch/arm64/mm/dma-mapping.c
arch/ia64/hp/common/hwsw_iommu.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/kernel/dma-mapping.c
arch/mips/include/asm/dma-mapping.h
arch/parisc/kernel/setup.c
arch/sparc/include/asm/dma-mapping.h
arch/x86/kernel/pci-dma.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/iommu/amd_iommu.c
include/asm-generic/dma-mapping.h
include/linux/dma-direct.h
include/linux/dma-mapping.h
include/linux/dma-noncoherent.h
kernel/dma/direct.c
kernel/dma/mapping.c

index 8beeafd4f68e45c8e7e1a6a006719f549cf70ca3..0ee6a5c99b16b555a85eafd156d9e252374b4117 100644 (file)
@@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops;
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
 #ifdef CONFIG_ALPHA_JENSEN
-       return &dma_direct_ops;
+       return NULL;
 #else
        return &alpha_pci_ops;
 #endif
index f2701c13a66b209571ff89b71ac6c93cabb9835d..e188bb3ede53ec2c51c60c64f1193d08b0e17501 100644 (file)
@@ -1280,7 +1280,7 @@ void __init arc_cache_init_master(void)
        /*
         * In case of IOC (say IOC+SLC case), pointers above could still be set
         * but end up not being relevant as the first function in chain is not
-        * called at all for @dma_direct_ops
+        * called at all for devices using coherent DMA.
         *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
         */
 }
index 965b7c846ecb1c61318ed743342ec85e97ca2e03..31d3b96f0f4b1f6bd73bd7fc675b44f1d7736e5c 100644 (file)
@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
+       return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
 }
 
 #ifdef __arch_page_to_dma
index 712416ecd8e6c8e6dd721a8247fe5d56d61d8877..f304b10e23a4cbfa9c3c89335dd917418f82c45f 100644 (file)
@@ -22,7 +22,7 @@
 #include "dma.h"
 
 /*
- *  dma_direct_ops is used if
+ *  The generic direct mapping code is used if
  *   - MMU/MPU is off
  *   - cpu is v7m w/o cache support
  *   - device is coherent
@@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = {
 };
 EXPORT_SYMBOL(arm_nommu_dma_ops);
 
-static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
-{
-       return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
-}
-
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        const struct iommu_ops *iommu, bool coherent)
 {
-       const struct dma_map_ops *dma_ops;
-
        if (IS_ENABLED(CONFIG_CPU_V7M)) {
                /*
                 * Cache support for v7m is optional, so can be treated as
@@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
        }
 
-       dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
-
-       set_dma_ops(dev, dma_ops);
+       if (!dev->archdata.dma_coherent)
+               set_dma_ops(dev, &arm_nommu_dma_ops);
 }
index ab1e417204d0339fd40a057a7b613a83e4b39416..95eda81e3f2d21ff8991b0a61d7a152fe364fef7 100644 (file)
@@ -462,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        const struct iommu_ops *iommu, bool coherent)
 {
-       if (!dev->dma_ops)
-               dev->dma_ops = &dma_direct_ops;
-
        dev->dma_coherent = coherent;
        __iommu_setup_dma_ops(dev, dma_base, size, iommu);
 
index f40ca499b246e05e70ba88cb10ee520ba5eb46af..8840ed97712f2c5798366a8d27d1840568935c1e 100644 (file)
@@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev)
 const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
 {
        if (use_swiotlb(dev))
-               return &dma_direct_ops;
+               return NULL;
        return &sba_dma_ops;
 }
 EXPORT_SYMBOL(hwsw_dma_get_ops);
index 5ee74820a0f6d7ffef21ca0bf45b36b6404d7bc2..5a361e51cb1ef822522b46d2d9dc6462a97464de 100644 (file)
@@ -2078,7 +2078,7 @@ sba_init(void)
         * a successful kdump kernel boot is to use the swiotlb.
         */
        if (is_kdump_kernel()) {
-               dma_ops = &dma_direct_ops;
+               dma_ops = NULL;
                if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
                        panic("Unable to initialize software I/O TLB:"
                                  " Try machvec=dig boot option");
@@ -2100,7 +2100,7 @@ sba_init(void)
                 * If we didn't find something sba_iommu can claim, we
                 * need to setup the swiotlb and switch to the dig machvec.
                 */
-               dma_ops = &dma_direct_ops;
+               dma_ops = NULL;
                if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
                        panic("Unable to find SBA IOMMU or initialize "
                              "software I/O TLB: Try machvec=dig boot option");
index 80cd3e1ea95a2f3530eb4c40e917813406fce3f8..ad7d9963de34f1f970c000fd82639b49fffbe11e 100644 (file)
@@ -36,7 +36,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
 
 void __init swiotlb_dma_init(void)
 {
-       dma_ops = &dma_direct_ops;
        swiotlb_init(1);
 }
 #endif
index 69f914667f3e304349a3d542abba58ae3a2b25ea..20dfaad3a55d2cdea6ad9dc5d2bb8f97b21e860d 100644 (file)
@@ -11,7 +11,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 #if defined(CONFIG_MACH_JAZZ)
        return &jazz_dma_ops;
 #else
-       return &dma_direct_ops;
+       return NULL;
 #endif
 }
 
index cd227f1cf629e0c1aa926d0029ad1706910958a6..54818cd78bd09460a42bce194ed636f1abff7809 100644 (file)
@@ -99,10 +99,6 @@ void __init dma_ops_init(void)
 
        case pcxl2:
                pa7300lc_init();
-       case pcxl: /* falls through */
-       case pcxs:
-       case pcxt:
-               hppa_dma_ops = &dma_direct_ops;
                break;
        default:
                break;
index 55a44f08a9a45f991114aa3ba50c101491a1c610..ed32845bd2d2f941086626142284785bb0431527 100644 (file)
@@ -12,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
 #ifdef CONFIG_SPARC_LEON
        if (sparc_cpu_model == sparc_leon)
-               return &dma_direct_ops;
+               return NULL;
 #endif
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
        if (bus == &pci_bus_type)
-               return &dma_direct_ops;
+               return NULL;
 #endif
        return dma_ops;
 }
index f4562fcec6815baaef955da689b01942d4995347..d460998ae8285141af01b4177617f10a1c944b86 100644 (file)
@@ -17,7 +17,7 @@
 
 static bool disable_dac_quirk __read_mostly;
 
-const struct dma_map_ops *dma_ops = &dma_direct_ops;
+const struct dma_map_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
 #ifdef CONFIG_IOMMU_DEBUG
index 61a84b958d671671cb56c37b6e44a589989d026b..50637f372e9f4a24eeac591f49d04698eddac192 100644 (file)
@@ -581,7 +581,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 
        dev_priv->map_mode = vmw_dma_map_populate;
 
-       if (dma_ops->sync_single_for_cpu)
+       if (dma_ops && dma_ops->sync_single_for_cpu)
                dev_priv->map_mode = vmw_dma_alloc_coherent;
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl() == 0)
index c5d6c7c42b0a4ce67ecc1e679fff367eaf4546fe..567221cca13c8b8c2389407142c13038d1d70af1 100644 (file)
@@ -2184,7 +2184,7 @@ static int amd_iommu_add_device(struct device *dev)
                                dev_name(dev));
 
                iommu_ignore_device(dev);
-               dev->dma_ops = &dma_direct_ops;
+               dev->dma_ops = NULL;
                goto out;
        }
        init_iommu_group(dev);
@@ -2770,17 +2770,6 @@ int __init amd_iommu_init_dma_ops(void)
        swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
 
-       /*
-        * In case we don't initialize SWIOTLB (actually the common case
-        * when AMD IOMMU is enabled and SME is not active), make sure there
-        * are global dma_ops set as a fall-back for devices not handled by
-        * this driver (for example non-PCI devices). When SME is active,
-        * make sure that swiotlb variable remains set so the global dma_ops
-        * continue to be SWIOTLB.
-        */
-       if (!swiotlb)
-               dma_ops = &dma_direct_ops;
-
        if (amd_iommu_unmap_flush)
                pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
        else
index 880a292d792fbab4f576a57d7dc31f082e28332b..c13f46109e888393124618937ae105a884d3bbbd 100644 (file)
@@ -4,7 +4,7 @@
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       return &dma_direct_ops;
+       return NULL;
 }
 
 #endif /* _ASM_GENERIC_DMA_MAPPING_H */
index 3b0a3ea3876dbf66b540cd2bac6a5ba1a7bc4f18..b7338702592a68638d46d5ba905b33c7b9caa800 100644 (file)
@@ -60,22 +60,5 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
 void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs);
-void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
-               size_t size, enum dma_data_direction dir, unsigned long attrs);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
-               enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_sync_single_for_device(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir);
-void dma_direct_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir);
-void dma_direct_sync_single_for_cpu(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir);
-void dma_direct_sync_sg_for_cpu(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir);
 int dma_direct_supported(struct device *dev, u64 mask);
 #endif /* _LINUX_DMA_DIRECT_H */
index 269ee27fc3d9867a241d134a7e14252929287cba..f422aec0f53cfba6180d942761739cfe7f3e6684 100644 (file)
@@ -134,7 +134,6 @@ struct dma_map_ops {
 
 #define DMA_MAPPING_ERROR              (~(dma_addr_t)0)
 
-extern const struct dma_map_ops dma_direct_ops;
 extern const struct dma_map_ops dma_virt_ops;
 extern const struct dma_map_ops dma_dummy_ops;
 
@@ -222,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
 }
 #endif
 
+static inline bool dma_is_direct(const struct dma_map_ops *ops)
+{
+       return likely(!ops);
+}
+
+/*
+ * All the dma_direct_* declarations are here just for the indirect call bypass,
+ * and must not be used directly drivers!
+ */
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs);
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+               enum dma_data_direction dir, unsigned long attrs);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+    defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_device(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_sync_single_for_device(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+    defined(CONFIG_SWIOTLB)
+void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+               int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_sync_single_for_cpu(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline void dma_direct_unmap_sg(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir,
+               unsigned long attrs)
+{
+}
+static inline void dma_direct_sync_single_for_cpu(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
                                              size_t size,
                                              enum dma_data_direction dir,
@@ -232,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 
        BUG_ON(!valid_dma_direction(dir));
        debug_dma_map_single(dev, ptr, size);
-       addr = ops->map_page(dev, virt_to_page(ptr),
-                            offset_in_page(ptr), size,
-                            dir, attrs);
+       if (dma_is_direct(ops))
+               addr = dma_direct_map_page(dev, virt_to_page(ptr),
+                               offset_in_page(ptr), size, dir, attrs);
+       else
+               addr = ops->map_page(dev, virt_to_page(ptr),
+                               offset_in_page(ptr), size, dir, attrs);
        debug_dma_map_page(dev, virt_to_page(ptr),
                           offset_in_page(ptr), size,
                           dir, addr, true);
@@ -249,7 +314,9 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->unmap_page)
+       if (dma_is_direct(ops))
+               dma_direct_unmap_page(dev, addr, size, dir, attrs);
+       else if (ops->unmap_page)
                ops->unmap_page(dev, addr, size, dir, attrs);
        debug_dma_unmap_page(dev, addr, size, dir, true);
 }
@@ -272,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
        int ents;
 
        BUG_ON(!valid_dma_direction(dir));
-       ents = ops->map_sg(dev, sg, nents, dir, attrs);
+       if (dma_is_direct(ops))
+               ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+       else
+               ents = ops->map_sg(dev, sg, nents, dir, attrs);
        BUG_ON(ents < 0);
        debug_dma_map_sg(dev, sg, nents, ents, dir);
 
@@ -287,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
 
        BUG_ON(!valid_dma_direction(dir));
        debug_dma_unmap_sg(dev, sg, nents, dir);
-       if (ops->unmap_sg)
+       if (dma_is_direct(ops))
+               dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
+       else if (ops->unmap_sg)
                ops->unmap_sg(dev, sg, nents, dir, attrs);
 }
 
@@ -301,7 +373,10 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
        dma_addr_t addr;
 
        BUG_ON(!valid_dma_direction(dir));
-       addr = ops->map_page(dev, page, offset, size, dir, attrs);
+       if (dma_is_direct(ops))
+               addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+       else
+               addr = ops->map_page(dev, page, offset, size, dir, attrs);
        debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 
        return addr;
@@ -322,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
        BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
 
        addr = phys_addr;
-       if (ops->map_resource)
+       if (ops && ops->map_resource)
                addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
 
        debug_dma_map_resource(dev, phys_addr, size, dir, addr);
@@ -337,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->unmap_resource)
+       if (ops && ops->unmap_resource)
                ops->unmap_resource(dev, addr, size, dir, attrs);
        debug_dma_unmap_resource(dev, addr, size, dir);
 }
@@ -349,7 +424,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_single_for_cpu)
+       if (dma_is_direct(ops))
+               dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+       else if (ops->sync_single_for_cpu)
                ops->sync_single_for_cpu(dev, addr, size, dir);
        debug_dma_sync_single_for_cpu(dev, addr, size, dir);
 }
@@ -368,7 +445,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_single_for_device)
+       if (dma_is_direct(ops))
+               dma_direct_sync_single_for_device(dev, addr, size, dir);
+       else if (ops->sync_single_for_device)
                ops->sync_single_for_device(dev, addr, size, dir);
        debug_dma_sync_single_for_device(dev, addr, size, dir);
 }
@@ -387,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_sg_for_cpu)
+       if (dma_is_direct(ops))
+               dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
+       else if (ops->sync_sg_for_cpu)
                ops->sync_sg_for_cpu(dev, sg, nelems, dir);
        debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
 }
@@ -399,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_sg_for_device)
+       if (dma_is_direct(ops))
+               dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
+       else if (ops->sync_sg_for_device)
                ops->sync_sg_for_device(dev, sg, nelems, dir);
        debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
 
index 306557331d7dafe93a21b8900d74fdcabb21cdb5..69b36ed31a99c6300ad4dd4216ae570b592f2f87 100644 (file)
@@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                enum dma_data_direction direction);
 #else
-#define arch_dma_cache_sync NULL
+static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
+               size_t size, enum dma_data_direction direction)
+{
+}
 #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
 
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
index 85d8286a0ba27cf4d4baa26c20ce208df9b47b3a..79da61b49fa4a33065ff3564eb85da742bf38bf6 100644 (file)
@@ -223,6 +223,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
        if (!dev_is_dma_coherent(dev))
                arch_sync_dma_for_device(dev, paddr, size, dir);
 }
+EXPORT_SYMBOL(dma_direct_sync_single_for_device);
 
 void dma_direct_sync_sg_for_device(struct device *dev,
                struct scatterlist *sgl, int nents, enum dma_data_direction dir)
@@ -240,6 +241,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
                                        dir);
        }
 }
+EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
 #endif
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
@@ -258,6 +260,7 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
        if (unlikely(is_swiotlb_buffer(paddr)))
                swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
 }
+EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
 
 void dma_direct_sync_sg_for_cpu(struct device *dev,
                struct scatterlist *sgl, int nents, enum dma_data_direction dir)
@@ -277,6 +280,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
        if (!dev_is_dma_coherent(dev))
                arch_sync_dma_for_cpu_all(dev);
 }
+EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
 
 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
@@ -289,6 +293,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
        if (unlikely(is_swiotlb_buffer(phys)))
                swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
+EXPORT_SYMBOL(dma_direct_unmap_page);
 
 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
                int nents, enum dma_data_direction dir, unsigned long attrs)
@@ -300,11 +305,7 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
                dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
                             attrs);
 }
-#else
-void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-}
+EXPORT_SYMBOL(dma_direct_unmap_sg);
 #endif
 
 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
@@ -331,6 +332,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
                arch_sync_dma_for_device(dev, phys, size, dir);
        return dma_addr;
 }
+EXPORT_SYMBOL(dma_direct_map_page);
 
 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
                enum dma_data_direction dir, unsigned long attrs)
@@ -352,6 +354,7 @@ out_unmap:
        dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
        return 0;
 }
+EXPORT_SYMBOL(dma_direct_map_sg);
 
 /*
  * Because 32-bit DMA masks are so common we expect every architecture to be
@@ -372,27 +375,3 @@ int dma_direct_supported(struct device *dev, u64 mask)
 
        return mask >= phys_to_dma(dev, min_mask);
 }
-
-const struct dma_map_ops dma_direct_ops = {
-       .alloc                  = dma_direct_alloc,
-       .free                   = dma_direct_free,
-       .map_page               = dma_direct_map_page,
-       .map_sg                 = dma_direct_map_sg,
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
-    defined(CONFIG_SWIOTLB)
-       .sync_single_for_device = dma_direct_sync_single_for_device,
-       .sync_sg_for_device     = dma_direct_sync_sg_for_device,
-#endif
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
-    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
-    defined(CONFIG_SWIOTLB)
-       .sync_single_for_cpu    = dma_direct_sync_single_for_cpu,
-       .sync_sg_for_cpu        = dma_direct_sync_sg_for_cpu,
-       .unmap_page             = dma_direct_unmap_page,
-       .unmap_sg               = dma_direct_unmap_sg,
-#endif
-       .get_required_mask      = dma_direct_get_required_mask,
-       .dma_supported          = dma_direct_supported,
-       .cache_sync             = arch_dma_cache_sync,
-};
-EXPORT_SYMBOL(dma_direct_ops);
index 0b18cfbdde95a28f8c75342a3cc27866bb1fbdf9..fc84c81029d904db59b3066cdc205da36972cecd 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <linux/memblock.h> /* for max_pfn */
 #include <linux/acpi.h>
+#include <linux/dma-direct.h>
 #include <linux/dma-noncoherent.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
@@ -229,8 +230,8 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
                unsigned long attrs)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
-       BUG_ON(!ops);
-       if (ops->get_sgtable)
+
+       if (!dma_is_direct(ops) && ops->get_sgtable)
                return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
                                        attrs);
        return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -293,8 +294,8 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
                unsigned long attrs)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
-       BUG_ON(!ops);
-       if (ops->mmap)
+
+       if (!dma_is_direct(ops) && ops->mmap)
                return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
        return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
@@ -324,6 +325,8 @@ u64 dma_get_required_mask(struct device *dev)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
+       if (dma_is_direct(ops))
+               return dma_direct_get_required_mask(dev);
        if (ops->get_required_mask)
                return ops->get_required_mask(dev);
        return dma_default_get_required_mask(dev);
@@ -341,7 +344,6 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
        const struct dma_map_ops *ops = get_dma_ops(dev);
        void *cpu_addr;
 
-       BUG_ON(!ops);
        WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
 
        if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
@@ -352,10 +354,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 
        if (!arch_dma_alloc_attrs(&dev))
                return NULL;
-       if (!ops->alloc)
+
+       if (dma_is_direct(ops))
+               cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
+       else if (ops->alloc)
+               cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+       else
                return NULL;
 
-       cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
        debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
        return cpu_addr;
 }
@@ -366,8 +372,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
-       BUG_ON(!ops);
-
        if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
                return;
        /*
@@ -379,11 +383,14 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
         */
        WARN_ON(irqs_disabled());
 
-       if (!ops->free || !cpu_addr)
+       if (!cpu_addr)
                return;
 
        debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-       ops->free(dev, size, cpu_addr, dma_handle, attrs);
+       if (dma_is_direct(ops))
+               dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+       else if (ops->free)
+               ops->free(dev, size, cpu_addr, dma_handle, attrs);
 }
 EXPORT_SYMBOL(dma_free_attrs);
 
@@ -397,9 +404,9 @@ int dma_supported(struct device *dev, u64 mask)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
-       if (!ops)
-               return 0;
-       if (!ops->dma_supported)
+       if (dma_is_direct(ops))
+               return dma_direct_supported(dev, mask);
+       if (ops->dma_supported)
                return 1;
        return ops->dma_supported(dev, mask);
 }
@@ -437,7 +444,10 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        const struct dma_map_ops *ops = get_dma_ops(dev);
 
        BUG_ON(!valid_dma_direction(dir));
-       if (ops->cache_sync)
+
+       if (dma_is_direct(ops))
+               arch_dma_cache_sync(dev, vaddr, size, dir);
+       else if (ops->cache_sync)
                ops->cache_sync(dev, vaddr, size, dir);
 }
 EXPORT_SYMBOL(dma_cache_sync);