In the same spirit as commit 63f501e07a85 ("powerpc/8xx: Simplify TLB
handling"), simplify flush_tlb_kernel_range() for 8xx.

8xx cannot be SMP, and has 'tlbie' and 'tlbia' instructions, so
an inline version of flush_tlb_kernel_range() for 8xx is worth it.

With this page, first leg of change_page_attr() is:

          2c:   55 29 00 3c     rlwinm  r9,r9,0,0,30
          30:   91 23 00 00     stw     r9,0(r3)
          34:   7c 00 22 64     tlbie   r4,r0
          38:   7c 00 04 ac     hwsync
          3c:   38 60 00 00     li      r3,0
          40:   4e 80 00 20     blr

Before the patch it was:

          30:   55 29 00 3c     rlwinm  r9,r9,0,0,30
          34:   91 2a 00 00     stw     r9,0(r10)
          38:   94 21 ff f0     stwu    r1,-16(r1)
          3c:   7c 08 02 a6     mflr    r0
          40:   38 83 10 00     addi    r4,r3,4096
          44:   90 01 00 14     stw     r0,20(r1)
          48:   48 00 00 01     bl      48 <change_page_attr+0x48>
                                48: R_PPC_REL24 flush_tlb_kernel_range
          4c:   80 01 00 14     lwz     r0,20(r1)
          50:   38 60 00 00     li      r3,0
          54:   7c 08 03 a6     mtlr    r0
          58:   38 21 00 10     addi    r1,r1,16
          5c:   4e 80 00 20     blr

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/include/asm/nohash/tlbflush.h | 12 +++++++++++-
 arch/powerpc/mm/nohash/tlb.c               |  2 ++
 2 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h 
b/arch/powerpc/include/asm/nohash/tlbflush.h
index c08d25e3e626..698935d4f72d 100644
--- a/arch/powerpc/include/asm/nohash/tlbflush.h
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -30,7 +30,6 @@ struct mm_struct;
 
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifdef CONFIG_PPC_8xx
 static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -45,7 +44,18 @@ static inline void local_flush_tlb_page(struct 
vm_area_struct *vma, unsigned lon
 {
        asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
 }
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long 
end)
+{
+       start &= PAGE_MASK;
+
+       if (end - start <= PAGE_SIZE)
+               asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
+       else
+               asm volatile ("sync; tlbia; isync" : : : "memory");
+}
 #else
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 extern void local_flush_tlb_mm(struct mm_struct *mm);
 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long 
vmaddr);
 
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index fd2c77af5c55..47f81d1c35dc 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -358,6 +358,7 @@ void __init early_init_mmu_47x(void)
 /*
  * Flush kernel TLB entries in the given range
  */
+#ifndef CONFIG_PPC_8xx
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_SMP
@@ -370,6 +371,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned 
long end)
 #endif
 }
 EXPORT_SYMBOL(flush_tlb_kernel_range);
+#endif
 
 /*
  * Currently, for range flushing, we just do a full mm flush. This should
-- 
2.35.1

Reply via email to