From c7ca9fe17b84719ef2edbe854e1b0cac04a91e2f Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 9 Oct 2015 02:44:23 +0300 Subject: [PATCH] xtensa: support DMA to high memory - don't bugcheck if high memory page is passed to xtensa_map_page; - turn empty dcache flush macros into functions so that they could be passed as function parameters; - use kmap_atomic to map high memory pages for cache invalidation/ flushing performed by xtensa_sync_single_for_{cpu,device}. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/cacheflush.h | 11 +++++-- arch/xtensa/kernel/pci-dma.c | 45 +++++++++++++++++++--------- 2 files changed, 39 insertions(+), 17 deletions(-) diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 7158881771ac..397d6a1a4224 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -55,9 +55,14 @@ extern void __flush_dcache_range(unsigned long, unsigned long); extern void __flush_invalidate_dcache_page(unsigned long); extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); #else -# define __flush_dcache_range(p,s) do { } while(0) -# define __flush_dcache_page(p) do { } while(0) -# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) +static inline void __flush_dcache_page(unsigned long va) +{ +} +static inline void __flush_dcache_range(unsigned long va, unsigned long sz) +{ +} +# define __flush_invalidate_dcache_all() __invalidate_dcache_all() +# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s) #endif diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index fb75ebf1463a..cd66698348ca 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -15,14 +15,15 @@ * Joe Taylor */ -#include -#include -#include -#include #include +#include +#include #include -#include +#include +#include +#include #include +#include void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) @@ -47,17 +48,36 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, } EXPORT_SYMBOL(dma_cache_sync); +static void do_cache_op(dma_addr_t dma_handle, size_t size, + void (*fn)(unsigned long, unsigned long)) +{ + unsigned long off = dma_handle & (PAGE_SIZE - 1); + unsigned long pfn = PFN_DOWN(dma_handle); + struct page *page = pfn_to_page(pfn); + + if (!PageHighMem(page)) + fn((unsigned long)bus_to_virt(dma_handle), size); + else + while (size > 0) { + size_t sz = min_t(size_t, size, PAGE_SIZE - off); + void *vaddr = kmap_atomic(page); + + fn((unsigned long)vaddr + off, sz); + kunmap_atomic(vaddr); + off = 0; + ++page; + size -= sz; + } +} + static void xtensa_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { - void *vaddr; - switch (dir) { case DMA_BIDIRECTIONAL: case DMA_FROM_DEVICE: - vaddr = bus_to_virt(dma_handle); - __invalidate_dcache_range((unsigned long)vaddr, size); + do_cache_op(dma_handle, size, __invalidate_dcache_range); break; case DMA_NONE: @@ -73,13 +93,11 @@ static void xtensa_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { - void *vaddr; - switch (dir) { case DMA_BIDIRECTIONAL: case DMA_TO_DEVICE: - vaddr = bus_to_virt(dma_handle); - __flush_dcache_range((unsigned long)vaddr, size); + if (XCHAL_DCACHE_IS_WRITEBACK) + do_cache_op(dma_handle, size, __flush_dcache_range); break; case DMA_NONE: @@ -171,7 +189,6 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, { dma_addr_t dma_handle = page_to_phys(page) + offset; - BUG_ON(PageHighMem(page)); xtensa_sync_single_for_device(dev, dma_handle, size, dir); return dma_handle; } -- 2.30.2