This patch adds the following six helper functions to help improve
modularization and readability of the code.

(1) slb_invalidate_all:         Invalidates the entire SLB
(2) slb_invalidate:             Invalidates SLB entries present in PACA
(3) mmu_linear_vsid_flags:      VSID flags for kernel linear mapping
(4) mmu_virtual_vsid_flags:     VSID flags for kernel virtual mapping
(5) mmu_vmemmap_vsid_flags:     VSID flags for kernel vmem mapping
(6) mmu_io_vsid_flags:          VSID flags for kernel I/O mapping

Signed-off-by: Anshuman Khandual <khand...@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 92 ++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 61 insertions(+), 31 deletions(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 701a57f..c87d5de 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -96,18 +96,37 @@ static inline void new_shadowed_slbe(unsigned long ea, int 
ssize,
                     : "memory" );
 }
 
+static inline unsigned long mmu_linear_vsid_flags(void)
+{
+       return SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
+}
+
+static inline unsigned long mmu_vmalloc_vsid_flags(void)
+{
+       return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+}
+
+static inline unsigned long mmu_io_vsid_flags(void)
+{
+       return SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long mmu_vmemmap_vsid_flags(void)
+{
+       return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
+}
+#endif
+
 static void __slb_flush_and_rebolt(void)
 {
        /* If you change this make sure you change SLB_NUM_BOLTED
         * and PR KVM appropriately too. */
-       unsigned long linear_llp, vmalloc_llp, lflags, vflags;
+       unsigned long lflags, vflags;
        unsigned long ksp_esid_data, ksp_vsid_data;
 
-       linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
-       vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
-       lflags = SLB_VSID_KERNEL | linear_llp;
-       vflags = SLB_VSID_KERNEL | vmalloc_llp;
-
+       lflags = mmu_linear_vsid_flags();
+       vflags = mmu_vmalloc_vsid_flags();
        ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 
KSTACK_SLOT);
        if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
                ksp_esid_data &= ~SLB_ESID_V;
@@ -155,7 +174,7 @@ void slb_vmalloc_update(void)
 {
        unsigned long vflags;
 
-       vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+       vflags = mmu_vmalloc_vsid_flags();
        slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 
VMALLOC_SLOT);
        slb_flush_and_rebolt();
 }
@@ -189,26 +208,15 @@ static inline int esids_match(unsigned long addr1, 
unsigned long addr2)
        return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
 }
 
-/* Flush all user entries from the segment table of the current processor. */
-void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
+static void slb_invalidate(void)
 {
-       unsigned long offset;
        unsigned long slbie_data = 0;
-       unsigned long pc = KSTK_EIP(tsk);
-       unsigned long stack = KSTK_ESP(tsk);
-       unsigned long exec_base;
+       unsigned long offset;
+       int i;
 
-       /*
-        * We need interrupts hard-disabled here, not just soft-disabled,
-        * so that a PMU interrupt can't occur, which might try to access
-        * user memory (to get a stack trace) and possible cause an SLB miss
-        * which would update the slb_cache/slb_cache_ptr fields in the PACA.
-        */
-       hard_irq_disable();
        offset = get_paca()->slb_cache_ptr;
        if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
            offset <= SLB_CACHE_ENTRIES) {
-               int i;
                asm volatile("isync" : : : "memory");
                for (i = 0; i < offset; i++) {
                        slbie_data = (unsigned long)get_paca()->slb_cache[i]
@@ -226,6 +234,23 @@ void switch_slb(struct task_struct *tsk, struct mm_struct 
*mm)
        /* Workaround POWER5 < DD2.1 issue */
        if (offset == 1 || offset > SLB_CACHE_ENTRIES)
                asm volatile("slbie %0" : : "r" (slbie_data));
+}
+
+/* Flush all user entries from the segment table of the current processor. */
+void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
+{
+       unsigned long pc = KSTK_EIP(tsk);
+       unsigned long stack = KSTK_ESP(tsk);
+       unsigned long exec_base;
+
+       /*
+        * We need interrupts hard-disabled here, not just soft-disabled,
+        * so that a PMU interrupt can't occur, which might try to access
+        * user memory (to get a stack trace) and possible cause an SLB miss
+        * which would update the slb_cache/slb_cache_ptr fields in the PACA.
+        */
+       hard_irq_disable();
+       slb_invalidate();
 
        get_paca()->slb_cache_ptr = 0;
        get_paca()->context = mm->context;
@@ -258,6 +283,14 @@ static inline void patch_slb_encoding(unsigned int 
*insn_addr,
        patch_instruction(insn_addr, insn);
 }
 
+/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+static inline void slb_invalidate_all(void)
+{
+       asm volatile("isync":::"memory");
+       asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
+       asm volatile("isync; slbia; isync":::"memory");
+}
+
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -285,16 +318,16 @@ void slb_initialize(void)
        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
        io_llp = mmu_psize_defs[mmu_io_psize].sllp;
        vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
-       get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+       get_paca()->vmalloc_sllp = mmu_vmalloc_vsid_flags();
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
        vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
 #endif
        if (!slb_encoding_inited) {
                slb_encoding_inited = 1;
                patch_slb_encoding(slb_miss_kernel_load_linear,
-                                  SLB_VSID_KERNEL | linear_llp);
+                                  mmu_linear_vsid_flags());
                patch_slb_encoding(slb_miss_kernel_load_io,
-                                  SLB_VSID_KERNEL | io_llp);
+                                  mmu_io_vsid_flags());
                patch_slb_encoding(slb_compare_rr_to_size,
                                   mmu_slb_size);
 
@@ -303,20 +336,17 @@ void slb_initialize(void)
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                patch_slb_encoding(slb_miss_kernel_load_vmemmap,
-                                  SLB_VSID_KERNEL | vmemmap_llp);
+                                  mmu_vmemmap_vsid_flags());
                pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
 #endif
        }
 
        get_paca()->stab_rr = SLB_NUM_BOLTED;
 
-       lflags = SLB_VSID_KERNEL | linear_llp;
-       vflags = SLB_VSID_KERNEL | vmalloc_llp;
+       lflags = mmu_linear_vsid_flags();
+       vflags = mmu_vmalloc_vsid_flags();
 
-       /* Invalidate the entire SLB (even entry 0) & all the ERATS */
-       asm volatile("isync":::"memory");
-       asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
-       asm volatile("isync; slbia; isync":::"memory");
+       slb_invalidate_all();
        new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_SLOT);
        new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 
VMALLOC_SLOT);
 
-- 
2.1.0

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to