book3s64's ioremap_range() is almost same as fallback ioremap_range(),
except that it calls radix__ioremap_range() when radix is enabled.

radix__ioremap_range() is also very similar to the other ones, expect
that it calls ioremap_page_range when slab is available.

Lets keep only one version of ioremap_range() which calls
ioremap_page_range() on all platforms when slab is available.

At the same time, drop the nid parameter which is not used.

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
 arch/powerpc/include/asm/book3s/64/radix.h |  3 ---
 arch/powerpc/mm/book3s64/pgtable.c         | 21 ---------------------
 arch/powerpc/mm/book3s64/radix_pgtable.c   | 20 --------------------
 arch/powerpc/mm/ioremap.c                  | 23 +++++++++++++----------
 4 files changed, 13 insertions(+), 54 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h 
b/arch/powerpc/include/asm/book3s/64/radix.h
index e04a839cb5b9..574eca33f893 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -266,9 +266,6 @@ extern void radix__vmemmap_remove_mapping(unsigned long 
start,
 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                                 pgprot_t flags, unsigned int psz);
 
-extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa,
-                               unsigned long size, pgprot_t prot, int nid);
-
 static inline unsigned long radix__get_tree_size(void)
 {
        unsigned long rts_field;
diff --git a/arch/powerpc/mm/book3s64/pgtable.c 
b/arch/powerpc/mm/book3s64/pgtable.c
index 7d0e0d0d22c4..4c8bed856533 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -446,24 +446,3 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
 
        return true;
 }
-
-int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, 
pgprot_t prot, int nid)
-{
-       unsigned long i;
-
-       if (radix_enabled())
-               return radix__ioremap_range(ea, pa, size, prot, nid);
-
-       for (i = 0; i < size; i += PAGE_SIZE) {
-               int err = map_kernel_page(ea + i, pa + i, prot);
-               if (err) {
-                       if (slab_is_available())
-                               unmap_kernel_range(ea, size);
-                       else
-                               WARN_ON_ONCE(1); /* Should clean up */
-                       return err;
-               }
-       }
-
-       return 0;
-}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c 
b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 11303e2fffb1..d39edbb07bd1 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1218,26 +1218,6 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
        return 1;
 }
 
-int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
-                       pgprot_t prot, int nid)
-{
-       if (likely(slab_is_available())) {
-               int err = ioremap_page_range(ea, ea + size, pa, prot);
-               if (err)
-                       unmap_kernel_range(ea, size);
-               return err;
-       } else {
-               unsigned long i;
-
-               for (i = 0; i < size; i += PAGE_SIZE) {
-                       int err = map_kernel_page(ea + i, pa + i, prot);
-                       if (WARN_ON_ONCE(err)) /* Should clean up */
-                               return err;
-               }
-               return 0;
-       }
-}
-
 int __init arch_ioremap_p4d_supported(void)
 {
        return 0;
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 537c9148cea1..dc538d7f2467 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -76,21 +76,24 @@ void __iomem *ioremap_prot(phys_addr_t addr, unsigned long 
size, unsigned long f
 }
 EXPORT_SYMBOL(ioremap_prot);
 
-int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
-                        pgprot_t prot, int nid)
+static int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
+                        pgprot_t prot)
 {
        unsigned long i;
 
+       if (slab_is_available()) {
+               int err = ioremap_page_range(ea, ea + size, pa, prot);
+
+               if (err)
+                       unmap_kernel_range(ea, size);
+               return err;
+       }
+
        for (i = 0; i < size; i += PAGE_SIZE) {
                int err = map_kernel_page(ea + i, pa + i, prot);
 
-               if (err) {
-                       if (slab_is_available())
-                               unmap_kernel_range(ea, size);
-                       else
-                               WARN_ON_ONCE(1); /* Should clean up */
+               if (WARN_ON_ONCE(err))  /* Should clean up */
                        return err;
-               }
        }
 
        return 0;
@@ -165,7 +168,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned 
long size,
                ioremap_bot -= size;
                va = ioremap_bot;
        }
-       ret = ioremap_range(va, pa, size, prot, NUMA_NO_NODE);
+       ret = ioremap_range(va, pa, size, prot);
        if (!ret)
                return (void __iomem *)va + (addr & ~PAGE_MASK);
 
@@ -223,7 +226,7 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, 
unsigned long size, pgprot_
        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
        WARN_ON(size & ~PAGE_MASK);
 
-       if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE))
+       if (ioremap_range((unsigned long)ea, pa, size, prot))
                return NULL;
 
        return (void __iomem *)ea;
-- 
2.13.3

Reply via email to