All platforms could benefit from page order check against MAX_PAGE_ORDER
before allocating a CMA area for gigantic hugetlb pages. Let's move this
check from individual platforms to generic hugetlb.

Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <w...@kernel.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Nicholas Piggin <npig...@gmail.com>
Cc: linux-arm-ker...@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux...@kvack.org
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khand...@arm.com>
---
This applies on v6.8-rc3
 
 arch/arm64/mm/hugetlbpage.c   | 7 -------
 arch/powerpc/mm/hugetlbpage.c | 4 +---
 mm/hugetlb.c                  | 7 +++++++
 3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..6720ec8d50e7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
        else
                order = CONT_PMD_SHIFT - PAGE_SHIFT;
 
-       /*
-        * HugeTLB CMA reservation is required for gigantic
-        * huge pages which could not be allocated via the
-        * page allocator. Just warn if there is any change
-        * breaking this assumption.
-        */
-       WARN_ON(order <= MAX_PAGE_ORDER);
        hugetlb_cma_reserve(order);
 }
 #endif /* CONFIG_CMA */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aab6..16557d008eef 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
                 */
                order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
 
-       if (order) {
-               VM_WARN_ON(order <= MAX_PAGE_ORDER);
+       if (order)
                hugetlb_cma_reserve(order);
-       }
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cf9c9b2906ea..345b3524df35 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7699,6 +7699,13 @@ void __init hugetlb_cma_reserve(int order)
        bool node_specific_cma_alloc = false;
        int nid;
 
+       /*
+        * HugeTLB CMA reservation is required for gigantic
+        * huge pages which could not be allocated via the
+        * page allocator. Just warn if there is any change
+        * breaking this assumption.
+        */
+       VM_WARN_ON(order <= MAX_PAGE_ORDER);
        cma_reserve_called = true;
 
        if (!hugetlb_cma_size)
-- 
2.25.1

Reply via email to