This is an automated email from the ASF dual-hosted git repository.

gnutt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit a532b0b53aa5e5ad6527d6786b50ce39d11000b3
Author: Jukka Laitinen <[email protected]>
AuthorDate: Tue Oct 1 15:05:41 2019 +0300

    arch/arm/src/stm32h7/stm32_dma.c: Optimization for stm32_sdma_capable
    
    It should not be an error to clean cache beyond the dma source buffer
    boundaries. It would just prematurely push some unrelated data from
    cache to memory.
    
    The only case where it would corrupt memory is that there is a dma
    destination buffer overlapping the same cache line with the source
    buffer. But this can't happen, because a destination buffer must always
    be cache-line aligned when using write-back cache.
    
    This patch enables doing dma tx-only transfer from unaligned source
    buffer when using write-back cache.
    
    Signed-off-by: Jukka Laitinen <[email protected]>
---
 arch/arm/src/stm32h7/stm32_dma.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/arm/src/stm32h7/stm32_dma.c b/arch/arm/src/stm32h7/stm32_dma.c
index 96ce049..bfa4f0d 100644
--- a/arch/arm/src/stm32h7/stm32_dma.c
+++ b/arch/arm/src/stm32h7/stm32_dma.c
@@ -1523,19 +1523,21 @@ static bool stm32_sdma_capable(FAR stm32_dmacfg_t *cfg)
 
 #  if defined(CONFIG_ARMV7M_DCACHE) && \
      !defined(CONFIG_ARMV7M_DCACHE_WRITETHROUGH)
-  /* buffer alignment is required for DMA transfers with dcache in buffered
-   * mode (not write-through) because a) arch_invalidate_dcache could lose
-   * buffered writes and b) arch_flush_dcache could corrupt adjacent memory if
-   * the maddr and the mend+1, the next next address are not on
-   * ARMV7M_DCACHE_LINESIZE boundaries.
+  /* buffer alignment is required for RX DMA transfers with dcache in
+   * buffered mode (not write-through) because arch_invalidate_dcache could
+   * lose buffered writes
    */
 
-  if ((cfg->maddr & (ARMV7M_DCACHE_LINESIZE - 1)) != 0 ||
-      ((mend + 1) & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
+  if ((ccr & DMA_SCR_DIR_MASK) == DMA_SCR_DIR_P2M ||
+      (ccr & DMA_SCR_DIR_MASK) == DMA_SCR_DIR_M2M)
     {
-      dmainfo("stm32_dmacapable: dcache unaligned maddr:0x%08x mend:0x%08x\n",
-              cfg->maddr, mend);
-      return false;
+      if ((cfg->maddr & (ARMV7M_DCACHE_LINESIZE - 1)) != 0 ||
+          ((mend + 1) & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
+        {
+          dmainfo("stm32_dmacapable: dcache unaligned "
+                  "maddr:0x%08x mend:0x%08x\n", cfg->maddr, mend);
+          return false;
+        }
     }
 #  endif
 

Reply via email to