On Tue, 2019-08-13 at 17:47 +0200, Christoph Hellwig wrote:
> The RISC-V ISA only supports flushing the instruction cache for the
> local
> CPU core.  For normal S-mode Linux remote flushing is offloaded to
> machine mode using ecalls, but for M-mode Linux we'll have to do it
> ourselves.  Use the same implementation as all the existing open
> source
> SBI implementations by just doing an IPI to all remote cores to
> execute
> th sfence.i instruction on every live core.
> 
> Signed-off-by: Christoph Hellwig <h...@lst.de>
> ---
>  arch/riscv/mm/cacheflush.c | 31 +++++++++++++++++++++++++++----
>  1 file changed, 27 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
> index 9ebcff8ba263..10875ea1065e 100644
> --- a/arch/riscv/mm/cacheflush.c
> +++ b/arch/riscv/mm/cacheflush.c
> @@ -10,10 +10,35 @@
>  
>  #include <asm/sbi.h>
>  
> +#ifdef CONFIG_M_MODE
> +static void ipi_remote_fence_i(void *info)
> +{
> +     return local_flush_icache_all();
> +}
> +
> +void flush_icache_all(void)
> +{
> +     on_each_cpu(ipi_remote_fence_i, NULL, 1);
> +}
> +
> +static void flush_icache_cpumask(const cpumask_t *mask)
> +{
> +     on_each_cpu_mask(mask, ipi_remote_fence_i, NULL, 1);
> +}
> +#else /* CONFIG_M_MODE */
>  void flush_icache_all(void)
>  {
>       sbi_remote_fence_i(NULL);
>  }
> +static void flush_icache_cpumask(const cpumask_t *mask)
> +{
> +     cpumask_t hmask;
> +
> +     cpumask_clear(&hmask);
> +     riscv_cpuid_to_hartid_mask(mask, &hmask);
> +     sbi_remote_fence_i(hmask.bits);
> +}
> +#endif /* CONFIG_M_MODE */
>  
>  /*
>   * Performs an icache flush for the given MM context.  RISC-V has no
> direct
> @@ -28,7 +53,7 @@ void flush_icache_all(void)
>  void flush_icache_mm(struct mm_struct *mm, bool local)
>  {
>       unsigned int cpu;
> -     cpumask_t others, hmask, *mask;
> +     cpumask_t others, *mask;
>  
>       preempt_disable();
>  
> @@ -47,9 +72,7 @@ void flush_icache_mm(struct mm_struct *mm, bool
> local)
>       cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
>       local |= cpumask_empty(&others);
>       if (mm != current->active_mm || !local) {
> -             cpumask_clear(&hmask);
> -             riscv_cpuid_to_hartid_mask(&others, &hmask);
> -             sbi_remote_fence_i(hmask.bits);
> +             flush_icache_cpumask(&others);
>       } else {
>               /*
>                * It's assumed that at least one strongly ordered
> operation is

Reviewed-by: Atish Patra <atish.pa...@wdc.com>

-- 
Regards,
Atish

Reply via email to