csky: use the generic remapping dma alloc implementation
authorChristoph Hellwig <hch@lst.de>
Sun, 4 Nov 2018 16:47:44 +0000 (17:47 +0100)
committerChristoph Hellwig <hch@lst.de>
Sat, 1 Dec 2018 17:07:16 +0000 (18:07 +0100)
The csky code was largely copied from arm/arm64, so switch to the
generic arm64-based implementation instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Guo Ren <ren_guo@c-sky.com>
arch/csky/Kconfig
arch/csky/mm/dma-mapping.c

index c0cf8e948821f2781e69c9a0909d651b5535c8b3..ea74f3a9eeaf9166ed167d25de9ce673d16e3a61 100644 (file)
@@ -8,7 +8,7 @@ config CSKY
        select CLKSRC_MMIO
        select CLKSRC_OF
        select DMA_DIRECT_OPS
-       select DMA_REMAP
+       select DMA_DIRECT_REMAP
        select IRQ_DOMAIN
        select HANDLE_DOMAIN_IRQ
        select DW_APB_TIMER_OF
index ad4046939713af8794bafb10c82ebe5e77d8be88..80783bb71c5cb8395b3c8baee713b310af89a396 100644 (file)
 #include <linux/version.h>
 #include <asm/cache.h>
 
-static struct gen_pool *atomic_pool;
-static size_t atomic_pool_size __initdata = SZ_256K;
-
-static int __init early_coherent_pool(char *p)
-{
-       atomic_pool_size = memparse(p, &p);
-       return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
 static int __init atomic_pool_init(void)
 {
-       struct page *page;
-       size_t size = atomic_pool_size;
-       void *ptr;
-       int ret;
-
-       atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
-       if (!atomic_pool)
-               BUG();
-
-       page = alloc_pages(GFP_KERNEL, get_order(size));
-       if (!page)
-               BUG();
-
-       ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
-                                         pgprot_noncached(PAGE_KERNEL),
-                                         __builtin_return_address(0));
-       if (!ptr)
-               BUG();
-
-       ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
-                               page_to_phys(page), atomic_pool_size, -1);
-       if (ret)
-               BUG();
-
-       gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
-
-       pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
-               atomic_pool_size / 1024);
-
-       pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
-               page_to_phys(page));
-
-       return 0;
+       return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
 }
 postcore_initcall(atomic_pool_init);
 
-static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
-                                  dma_addr_t *dma_handle)
-{
-       unsigned long addr;
-
-       addr = gen_pool_alloc(atomic_pool, size);
-       if (addr)
-               *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
-
-       return (void *)addr;
-}
-
-static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
-                                dma_addr_t dma_handle, unsigned long attrs)
-{
-       gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
-}
-
-static void __dma_clear_buffer(struct page *page, size_t size)
+void arch_dma_prep_coherent(struct page *page, size_t size)
 {
        if (PageHighMem(page)) {
                unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size)
        }
 }
 
-static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
-                                     dma_addr_t *dma_handle, gfp_t gfp,
-                                     unsigned long attrs)
-{
-       void  *vaddr;
-       struct page *page;
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       if (DMA_ATTR_NON_CONSISTENT & attrs) {
-               pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
-               return NULL;
-       }
-
-       if (IS_ENABLED(CONFIG_DMA_CMA))
-               page = dma_alloc_from_contiguous(dev, count, get_order(size),
-                                                gfp);
-       else
-               page = alloc_pages(gfp, get_order(size));
-
-       if (!page) {
-               pr_err("csky %s no more free pages.\n", __func__);
-               return NULL;
-       }
-
-       *dma_handle = page_to_phys(page);
-
-       __dma_clear_buffer(page, size);
-
-       if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
-               return page;
-
-       vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
-               pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
-       if (!vaddr)
-               BUG();
-
-       return vaddr;
-}
-
-static void csky_dma_free_nonatomic(
-       struct device *dev,
-       size_t size,
-       void *vaddr,
-       dma_addr_t dma_handle,
-       unsigned long attrs
-       )
-{
-       struct page *page = phys_to_page(dma_handle);
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       if ((unsigned int)vaddr >= VMALLOC_START)
-               dma_common_free_remap(vaddr, size, VM_USERMAP);
-
-       if (IS_ENABLED(CONFIG_DMA_CMA))
-               dma_release_from_contiguous(dev, page, count);
-       else
-               __free_pages(page, get_order(size));
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                    gfp_t gfp, unsigned long attrs)
-{
-       if (gfpflags_allow_blocking(gfp))
-               return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
-                                               attrs);
-       else
-               return csky_dma_alloc_atomic(dev, size, dma_handle);
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-                  dma_addr_t dma_handle, unsigned long attrs)
-{
-       if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
-               csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
-       else
-               csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
-}
-
 static inline void cache_op(phys_addr_t paddr, size_t size,
                            void (*fn)(unsigned long start, unsigned long end))
 {