In later patch we will update them which require them to be moved
to pgtable-radix.c Doing the move in separate patch helps in review.

No function change in this patch. Only code movement.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/radix.h | 63 +++-------------------
 arch/powerpc/mm/pgtable-radix.c            | 48 +++++++++++++++++
 2 files changed, 54 insertions(+), 57 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h 
b/arch/powerpc/include/asm/book3s/64/radix.h
index 705193e7192f..ff642441aaf6 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -124,6 +124,12 @@ extern void radix__mark_rodata_ro(void);
 extern void radix__mark_initmem_nx(void);
 #endif
 
+extern unsigned long radix__pte_update(struct mm_struct *mm, unsigned long 
addr,
+                                      pte_t *ptep, unsigned long clr,
+                                      unsigned long set, int huge);
+extern void radix__ptep_set_access_flags(struct mm_struct *mm, pte_t *ptep,
+                                        pte_t entry, unsigned long address);
+
 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
                                               unsigned long set)
 {
@@ -140,35 +146,6 @@ static inline unsigned long __radix_pte_update(pte_t 
*ptep, unsigned long clr,
        return old_pte;
 }
 
-
-static inline unsigned long radix__pte_update(struct mm_struct *mm,
-                                       unsigned long addr,
-                                       pte_t *ptep, unsigned long clr,
-                                       unsigned long set,
-                                       int huge)
-{
-       unsigned long old_pte;
-
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
-
-               unsigned long new_pte;
-
-               old_pte = __radix_pte_update(ptep, ~0ul, 0);
-               /*
-                * new value of pte
-                */
-               new_pte = (old_pte | set) & ~clr;
-               radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
-               if (new_pte)
-                       __radix_pte_update(ptep, 0, new_pte);
-       } else
-               old_pte = __radix_pte_update(ptep, clr, set);
-       if (!huge)
-               assert_pte_locked(mm, addr);
-
-       return old_pte;
-}
-
 static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
                                                   unsigned long addr,
                                                   pte_t *ptep, int full)
@@ -190,34 +167,6 @@ static inline pte_t radix__ptep_get_and_clear_full(struct 
mm_struct *mm,
        return __pte(old_pte);
 }
 
-/*
- * Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to invalidate tlb.
- */
-static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
-                                               pte_t *ptep, pte_t entry,
-                                               unsigned long address)
-{
-
-       unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
-                                             _PAGE_RW | _PAGE_EXEC);
-
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
-
-               unsigned long old_pte, new_pte;
-
-               old_pte = __radix_pte_update(ptep, ~0, 0);
-               /*
-                * new value of pte
-                */
-               new_pte = old_pte | set;
-               radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
-               __radix_pte_update(ptep, 0, new_pte);
-       } else
-               __radix_pte_update(ptep, 0, set);
-       asm volatile("ptesync" : : : "memory");
-}
-
 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
 {
        return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index ce24d72ea679..68931ca549f7 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -1084,3 +1084,51 @@ int radix__has_transparent_hugepage(void)
        return 0;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+unsigned long radix__pte_update(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, unsigned long clr,
+                               unsigned long set, int huge)
+{
+       unsigned long old_pte;
+
+       if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+
+               unsigned long new_pte;
+
+               old_pte = __radix_pte_update(ptep, ~0ul, 0);
+               /*
+                * new value of pte
+                */
+               new_pte = (old_pte | set) & ~clr;
+               radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
+               if (new_pte)
+                       __radix_pte_update(ptep, 0, new_pte);
+       } else
+               old_pte = __radix_pte_update(ptep, clr, set);
+       if (!huge)
+               assert_pte_locked(mm, addr);
+
+       return old_pte;
+}
+
+void radix__ptep_set_access_flags(struct mm_struct *mm,
+                                 pte_t *ptep, pte_t entry,
+                                 unsigned long address)
+{
+       unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
+                                             _PAGE_RW | _PAGE_EXEC);
+
+       if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+               unsigned long old_pte, new_pte;
+
+               old_pte = __radix_pte_update(ptep, ~0, 0);
+               /*
+                * new value of pte
+                */
+               new_pte = old_pte | set;
+               radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
+               __radix_pte_update(ptep, 0, new_pte);
+       } else
+               __radix_pte_update(ptep, 0, set);
+       asm volatile("ptesync" : : : "memory");
+}
-- 
2.17.0

Reply via email to