From: Yu Luming <luming...@gmail.com> ppc always do its own tracking for batch tlb. By trivially enabling the ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH in ppc, ppc arch can re-use common code in rmap and reduce overhead and do optimization it could not have without a tlb flushing context at low architecture level.
Signed-off-by: Luming Yu <luming...@shingroup.cn> --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/tlbbatch.h | 30 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 arch/powerpc/include/asm/tlbbatch.h diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index e94e7e4bfd40..e6db84dd014a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -175,6 +175,7 @@ config PPC select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WANT_LD_ORPHAN_WARN + select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if PPC_RADIX_MMU select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx select ARCH_WEAK_RELEASE_ACQUIRE diff --git a/arch/powerpc/include/asm/tlbbatch.h b/arch/powerpc/include/asm/tlbbatch.h new file mode 100644 index 000000000000..484628460057 --- /dev/null +++ b/arch/powerpc/include/asm/tlbbatch.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ARCH_PPC_TLBBATCH_H +#define _ARCH_PPC_TLBBATCH_H + +struct arch_tlbflush_unmap_batch { + /* + * + */ +}; + +static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) +{ +} + +static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uarddr) +{ +} + +static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) +{ + /*ppc always do tlb flush in batch*/ + return false; +} + +static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) +{ +} +#endif /* _ARCH_PPC_TLBBATCH_H */ -- 2.42.0.windows.2