From: Arnd Bergmann <a...@arndb.de>

The powerpc arch_sync_dma_for_device()/arch_sync_dma_for_cpu() functions
behave differently from all other architectures, at least for some of
the operations.

As a preparation for making the behavior more consistent, reorder the
logic in which they decide whether to flush, invalidate or clean the.
No change in behavior is intended.

Signed-off-by: Arnd Bergmann <a...@arndb.de>
---
 arch/powerpc/mm/dma-noncoherent.c | 91 +++++++++++++++++++++----------
 1 file changed, 63 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/mm/dma-noncoherent.c 
b/arch/powerpc/mm/dma-noncoherent.c
index 30260b5d146d..f10869d27de5 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -16,31 +16,28 @@
 #include <asm/tlbflush.h>
 #include <asm/dma.h>
 
+enum dma_cache_op {
+       DMA_CACHE_CLEAN,
+       DMA_CACHE_INVAL,
+       DMA_CACHE_FLUSH,
+};
+
 /*
  * make an area consistent.
  */
-static void __dma_sync(void *vaddr, size_t size, int direction)
+static void __dma_op(void *vaddr, size_t size, enum dma_cache_op op)
 {
        unsigned long start = (unsigned long)vaddr;
        unsigned long end   = start + size;
 
-       switch (direction) {
-       case DMA_NONE:
-               BUG();
-       case DMA_FROM_DEVICE:
-               /*
-                * invalidate only when cache-line aligned otherwise there is
-                * the potential for discarding uncommitted data from the cache
-                */
-               if ((start | end) & (L1_CACHE_BYTES - 1))
-                       flush_dcache_range(start, end);
-               else
-                       invalidate_dcache_range(start, end);
-               break;
-       case DMA_TO_DEVICE:             /* writeback only */
+       switch (op) {
+       case DMA_CACHE_CLEAN:
                clean_dcache_range(start, end);
                break;
-       case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+       case DMA_CACHE_INVAL:
+               invalidate_dcache_range(start, end);
+               break;
+       case DMA_CACHE_FLUSH:
                flush_dcache_range(start, end);
                break;
        }
@@ -48,16 +45,16 @@ static void __dma_sync(void *vaddr, size_t size, int 
direction)
 
 #ifdef CONFIG_HIGHMEM
 /*
- * __dma_sync_page() implementation for systems using highmem.
+ * __dma_highmem_op() implementation for systems using highmem.
  * In this case, each page of a buffer must be kmapped/kunmapped
- * in order to have a virtual address for __dma_sync(). This must
+ * in order to have a virtual address for __dma_op(). This must
  * not sleep so kmap_atomic()/kunmap_atomic() are used.
  *
  * Note: yes, it is possible and correct to have a buffer extend
  * beyond the first page.
  */
-static inline void __dma_sync_page_highmem(struct page *page,
-               unsigned long offset, size_t size, int direction)
+static inline void __dma_highmem_op(struct page *page,
+               unsigned long offset, size_t size, enum dma_cache_op op)
 {
        size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
        size_t cur_size = seg_size;
@@ -71,7 +68,7 @@ static inline void __dma_sync_page_highmem(struct page *page,
                start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
 
                /* Sync this buffer segment */
-               __dma_sync((void *)start, seg_size, direction);
+               __dma_op((void *)start, seg_size, op);
                kunmap_atomic((void *)start);
                seg_nr++;
 
@@ -88,32 +85,70 @@ static inline void __dma_sync_page_highmem(struct page 
*page,
 #endif /* CONFIG_HIGHMEM */
 
 /*
- * __dma_sync_page makes memory consistent. identical to __dma_sync, but
- * takes a struct page instead of a virtual address
+ * __dma_phys_op makes memory consistent. identical to __dma_op, but
+ * takes a phys_addr_t instead of a virtual address
  */
-static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
+static void __dma_phys_op(phys_addr_t paddr, size_t size, enum dma_cache_op op)
 {
        struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
        unsigned offset = paddr & ~PAGE_MASK;
 
 #ifdef CONFIG_HIGHMEM
-       __dma_sync_page_highmem(page, offset, size, dir);
+       __dma_highmem_op(page, offset, size, op);
 #else
        unsigned long start = (unsigned long)page_address(page) + offset;
-       __dma_sync((void *)start, size, dir);
+       __dma_op((void *)start, size, op);
 #endif
 }
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
                enum dma_data_direction dir)
 {
-       __dma_sync_page(paddr, size, dir);
+       switch (direction) {
+       case DMA_NONE:
+               BUG();
+       case DMA_FROM_DEVICE:
+               /*
+                * invalidate only when cache-line aligned otherwise there is
+                * the potential for discarding uncommitted data from the cache
+                */
+               if ((start | end) & (L1_CACHE_BYTES - 1))
+                       __dma_phys_op(start, end, DMA_CACHE_FLUSH);
+               else
+                       __dma_phys_op(start, end, DMA_CACHE_INVAL);
+               break;
+       case DMA_TO_DEVICE:             /* writeback only */
+               __dma_phys_op(start, end, DMA_CACHE_CLEAN);
+               break;
+       case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+               __dma_phys_op(start, end, DMA_CACHE_FLUSH);
+               break;
+       }
 }
 
 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
                enum dma_data_direction dir)
 {
-       __dma_sync_page(paddr, size, dir);
+       switch (direction) {
+       case DMA_NONE:
+               BUG();
+       case DMA_FROM_DEVICE:
+               /*
+                * invalidate only when cache-line aligned otherwise there is
+                * the potential for discarding uncommitted data from the cache
+                */
+               if ((start | end) & (L1_CACHE_BYTES - 1))
+                       __dma_phys_op(start, end, DMA_CACHE_FLUSH);
+               else
+                       __dma_phys_op(start, end, DMA_CACHE_INVAL);
+               break;
+       case DMA_TO_DEVICE:             /* writeback only */
+               __dma_phys_op(start, end, DMA_CACHE_CLEAN);
+               break;
+       case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+               __dma_phys_op(start, end, DMA_CACHE_FLUSH);
+               break;
+       }
 }
 
 void arch_dma_prep_coherent(struct page *page, size_t size)
-- 
2.39.2

Reply via email to