Don't make update_mmu_cache() a wrapper around __update_tlb(): call it
directly, and use the ptep (or pmdp) provided by the caller, instead of
re-calling pte_offset_map() - which would raise a question of whether a
pte_unmap() is needed to balance it.

Check whether the "ptep" provided by the caller is actually the pmdp,
instead of testing pmd_huge(): or test pmd_huge() too and warn if it
disagrees?  This is "hazardous" territory: needs review and testing.

Signed-off-by: Hugh Dickins <hu...@google.com>
---
 arch/mips/include/asm/pgtable.h | 15 +++------------
 arch/mips/mm/tlb-r3k.c          |  5 +++--
 arch/mips/mm/tlb-r4k.c          |  9 +++------
 3 files changed, 9 insertions(+), 20 deletions(-)

diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 574fa14ac8b2..9175dfab08d5 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -565,15 +565,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
 }
 #endif
 
-extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
-       pte_t pte);
-
-static inline void update_mmu_cache(struct vm_area_struct *vma,
-       unsigned long address, pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       __update_tlb(vma, address, pte);
-}
+extern void update_mmu_cache(struct vm_area_struct *vma,
+       unsigned long address, pte_t *ptep);
 
 #define        __HAVE_ARCH_UPDATE_MMU_TLB
 #define update_mmu_tlb update_mmu_cache
@@ -581,9 +574,7 @@ static inline void update_mmu_cache(struct vm_area_struct 
*vma,
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
        unsigned long address, pmd_t *pmdp)
 {
-       pte_t pte = *(pte_t *)pmdp;
-
-       __update_tlb(vma, address, pte);
+       update_mmu_cache(vma, address, (pte_t *)pmdp);
 }
 
 /*
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 53dfa2b9316b..e5722cd8dd6d 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -176,7 +176,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, 
unsigned long page)
        }
 }
 
-void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+void update_mmu_cache(struct vm_area_struct *vma,
+                     unsigned long address, pte_t *ptep)
 {
        unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
        unsigned long flags;
@@ -203,7 +204,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long 
address, pte_t pte)
        BARRIER;
        tlb_probe();
        idx = read_c0_index();
-       write_c0_entrylo0(pte_val(pte));
+       write_c0_entrylo0(pte_val(*ptep));
        write_c0_entryhi(address | pid);
        if (idx < 0) {                                  /* BARRIER */
                tlb_write_random();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 1b939abbe4ca..c96725d17cab 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -290,14 +290,14 @@ void local_flush_tlb_one(unsigned long page)
  * updates the TLB with the new pte(s), and another which also checks
  * for the R4k "end of page" hardware bug and does the needy.
  */
-void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t 
pte)
+void update_mmu_cache(struct vm_area_struct *vma,
+                     unsigned long address, pte_t *ptep)
 {
        unsigned long flags;
        pgd_t *pgdp;
        p4d_t *p4dp;
        pud_t *pudp;
        pmd_t *pmdp;
-       pte_t *ptep;
        int idx, pid;
 
        /*
@@ -326,10 +326,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned 
long address, pte_t pte)
        idx = read_c0_index();
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        /* this could be a huge page  */
-       if (pmd_huge(*pmdp)) {
+       if (ptep == (pte_t *)pmdp) {
                unsigned long lo;
                write_c0_pagemask(PM_HUGE_MASK);
-               ptep = (pte_t *)pmdp;
                lo = pte_to_entrylo(pte_val(*ptep));
                write_c0_entrylo0(lo);
                write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
@@ -344,8 +343,6 @@ void __update_tlb(struct vm_area_struct * vma, unsigned 
long address, pte_t pte)
        } else
 #endif
        {
-               ptep = pte_offset_map(pmdp, address);
-
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 #ifdef CONFIG_XPA
                write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
-- 
2.35.3

Reply via email to