On Sat, Dec 27, 2025 at 11:52:43AM +1300, Barry Song wrote:
> From: Barry Song <[email protected]>
> 
> dcache_inval_poc_nosync does not wait for the data cache invalidation to
> complete. Later, we defer the synchronization so we can wait for all SG
> entries together.
> 
> Cc: Leon Romanovsky <[email protected]>
> Cc: Catalin Marinas <[email protected]>
> Cc: Will Deacon <[email protected]>
> Cc: Marek Szyprowski <[email protected]>
> Cc: Robin Murphy <[email protected]>
> Cc: Ada Couprie Diaz <[email protected]>
> Cc: Ard Biesheuvel <[email protected]>
> Cc: Marc Zyngier <[email protected]>
> Cc: Anshuman Khandual <[email protected]>
> Cc: Ryan Roberts <[email protected]>
> Cc: Suren Baghdasaryan <[email protected]>
> Cc: Tangquan Zheng <[email protected]>
> Signed-off-by: Barry Song <[email protected]>
> ---
>  arch/arm64/include/asm/cacheflush.h |  1 +
>  arch/arm64/mm/cache.S               | 42 +++++++++++++++++++++--------
>  2 files changed, 32 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cacheflush.h 
> b/arch/arm64/include/asm/cacheflush.h
> index 9b6d0a62cf3d..382b4ac3734d 100644
> --- a/arch/arm64/include/asm/cacheflush.h
> +++ b/arch/arm64/include/asm/cacheflush.h
> @@ -74,6 +74,7 @@ extern void icache_inval_pou(unsigned long start, unsigned 
> long end);
>  extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
>  extern void dcache_inval_poc(unsigned long start, unsigned long end);
>  extern void dcache_clean_poc(unsigned long start, unsigned long end);
> +extern void dcache_inval_poc_nosync(unsigned long start, unsigned long end);
>  extern void dcache_clean_poc_nosync(unsigned long start, unsigned long end);
>  extern void dcache_clean_pop(unsigned long start, unsigned long end);
>  extern void dcache_clean_pou(unsigned long start, unsigned long end);
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index 4a7c7e03785d..99a093d3aecb 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -132,17 +132,7 @@ alternative_else_nop_endif
>       ret
>  SYM_FUNC_END(dcache_clean_pou)
>  
> -/*
> - *   dcache_inval_poc(start, end)
> - *
> - *   Ensure that any D-cache lines for the interval [start, end)
> - *   are invalidated. Any partial lines at the ends of the interval are
> - *   also cleaned to PoC to prevent data loss.
> - *
> - *   - start   - kernel start address of region
> - *   - end     - kernel end address of region
> - */
> -SYM_FUNC_START(__pi_dcache_inval_poc)
> +.macro raw_dcache_inval_poc_macro
>       dcache_line_size x2, x3
>       sub     x3, x2, #1
>       tst     x1, x3                          // end cache line aligned?
> @@ -158,11 +148,41 @@ SYM_FUNC_START(__pi_dcache_inval_poc)
>  3:   add     x0, x0, x2
>       cmp     x0, x1
>       b.lo    2b
> +.endm
> +
> +/*
> + *   dcache_inval_poc(start, end)
> + *
> + *   Ensure that any D-cache lines for the interval [start, end)
> + *   are invalidated. Any partial lines at the ends of the interval are
> + *   also cleaned to PoC to prevent data loss.
> + *
> + *   - start   - kernel start address of region
> + *   - end     - kernel end address of region
> + */
> +SYM_FUNC_START(__pi_dcache_inval_poc)
> +     raw_dcache_inval_poc_macro
>       dsb     sy
>       ret
>  SYM_FUNC_END(__pi_dcache_inval_poc)
>  SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc)
>  
> +/*
> + *   dcache_inval_poc_nosync(start, end)
> + *
> + *   Issue the instructions of D-cache lines for the interval [start, end)
> + *   for invalidation. Not necessarily cleaned to PoC till an explicit dsb
> + *   sy is issued later
> + *
> + *   - start   - kernel start address of region
> + *   - end     - kernel end address of region
> + */
> +SYM_FUNC_START(__pi_dcache_inval_poc_nosync)
> +     raw_dcache_inval_poc_macro
> +     ret

Sorry, similar naming nit to the other patch. Let's have the macro use
the 'nosync' suffix instead of the 'raw' prefix. You can chuck some
underscores at it if you want to keep the name of this function the same.

Will

Reply via email to