On BOOK3S32, hash_preload() neither use is_exec nor trap,
so drop those parameters and simplify update_mmu_cached().

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
 arch/powerpc/mm/book3s32/mmu.c | 29 +++++++----------------------
 arch/powerpc/mm/mmu_decl.h     |  3 +--
 arch/powerpc/mm/pgtable_32.c   |  2 +-
 3 files changed, 9 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 3e3c4077cdb7..3a62bf99f93f 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -297,8 +297,7 @@ void __init setbat(int index, unsigned long virt, 
phys_addr_t phys,
 /*
  * Preload a translation in the hash table
  */
-void hash_preload(struct mm_struct *mm, unsigned long ea,
-                 bool is_exec, unsigned long trap)
+void hash_preload(struct mm_struct *mm, unsigned long ea)
 {
        pmd_t *pmd;
 
@@ -324,34 +323,20 @@ void update_mmu_cache(struct vm_area_struct *vma, 
unsigned long address,
         * We don't need to worry about _PAGE_PRESENT here because we are
         * called with either mm->page_table_lock held or ptl lock held
         */
-       unsigned long trap;
-       bool is_exec;
 
        /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
        if (!pte_young(*ptep) || address >= TASK_SIZE)
                return;
 
-       /* We try to figure out if we are coming from an instruction
-        * access fault and pass that down to __hash_page so we avoid
-        * double-faulting on execution of fresh text. We have to test
-        * for regs NULL since init will get here first thing at boot
-        *
-        * We also avoid filling the hash if not coming from a fault
-        */
+       /* We have to test for regs NULL since init will get here first thing 
at boot */
+       if (!current->thread.regs)
+               return;
 
-       trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
-       switch (trap) {
-       case 0x300:
-               is_exec = false;
-               break;
-       case 0x400:
-               is_exec = true;
-               break;
-       default:
+       /* We also avoid filling the hash if not coming from a fault */
+       if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) 
!= 0x400)
                return;
-       }
 
-       hash_preload(vma->vm_mm, address, is_exec, trap);
+       hash_preload(vma->vm_mm, address);
 }
 
 /*
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 9f325a7a09cb..adbaf2167214 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -91,8 +91,7 @@ void print_system_hash_info(void);
 
 #ifdef CONFIG_PPC32
 
-void hash_preload(struct mm_struct *mm, unsigned long ea,
-                 bool is_exec, unsigned long trap);
+void hash_preload(struct mm_struct *mm, unsigned long ea);
 
 extern void mapin_ram(void);
 extern void setbat(int index, unsigned long virt, phys_addr_t phys,
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 35cb96cfc258..97f401a06fcc 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -252,7 +252,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, 
unsigned long top)
                map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
 #ifdef CONFIG_PPC_BOOK3S_32
                if (ktext)
-                       hash_preload(&init_mm, v, false, 0x300);
+                       hash_preload(&init_mm, v);
 #endif
                v += PAGE_SIZE;
                p += PAGE_SIZE;
-- 
2.13.3

Reply via email to