Currently 'hugetlb_cma=' command line argument does not create CMA area on
ARM64_16K_PAGES and ARM64_64K_PAGES based platforms. Instead, it just ends
up with the following warning message. Reason being, hugetlb_cma_reserve()
never gets called for these huge page sizes.

[   64.255669] hugetlb_cma: the option isn't supported by current arch

This enables CMA areas reservation on ARM64_16K_PAGES and ARM64_64K_PAGES
configs by defining an unified arm64_hugetlb_cma_reseve() that is wrapped
in CONFIG_CMA.

Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <w...@kernel.org>
Cc: Mark Rutland <mark.rutl...@arm.com>
Cc: Mike Kravetz <mike.krav...@oracle.com>
Cc: Barry Song <song.bao....@hisilicon.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: linux-arm-ker...@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khand...@arm.com>
---
Applies on 5.8-rc2.

 arch/arm64/include/asm/hugetlb.h |  8 ++++++++
 arch/arm64/mm/hugetlbpage.c      | 38 ++++++++++++++++++++++++++++++++++++++
 arch/arm64/mm/init.c             |  4 +---
 3 files changed, 47 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 94ba0c5..8eea0e0 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -17,6 +17,14 @@
 extern bool arch_hugetlb_migration_supported(struct hstate *h);
 #endif
 
+#ifdef CONFIG_CMA
+void arm64_hugetlb_cma_reserve(void);
+#else
+static inline void arm64_hugetlb_cma_reserve(void)
+{
+}
+#endif
+
 static inline void arch_clear_hugepage_flags(struct page *page)
 {
        clear_bit(PG_dcache_clean, &page->flags);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0a52ce4..ea7fb48 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -19,6 +19,44 @@
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
 
+/*
+ * HugeTLB Support Matrix
+ *
+ * ---------------------------------------------------
+ * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
+ * ---------------------------------------------------
+ * |     4K    |   64K    |   2M  |    32M   |   1G  |
+ * |    16K    |    2M    |  32M  |     1G   |       |
+ * |    64K    |    2M    | 512M  |    16G   |       |
+ * ---------------------------------------------------
+ */
+
+/*
+ * Reserve CMA areas for the largest supported gigantic
+ * huge page when requested. Any other smaller gigantic
+ * huge pages could still be served from those areas.
+ */
+#ifdef CONFIG_CMA
+void __init arm64_hugetlb_cma_reserve(void)
+{
+       int order;
+
+#ifdef CONFIG_ARM64_4K_PAGES
+       order = PUD_SHIFT - PAGE_SHIFT;
+#else
+       order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+#endif
+       /*
+        * HugeTLB CMA reservation is required for gigantic
+        * huge pages which could not be allocated via the
+        * page allocator. Just warn if there is any change
+        * breaking this assumption.
+        */
+       WARN_ON(order <= MAX_ORDER);
+       hugetlb_cma_reserve(order);
+}
+#endif /* CONFIG_CMA */
+
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 bool arch_hugetlb_migration_supported(struct hstate *h)
 {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 1e93cfc..fabf8b0 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -425,9 +425,7 @@ void __init bootmem_init(void)
         * initialize node_online_map that gets used in hugetlb_cma_reserve()
         * while allocating required CMA size across online nodes.
         */
-#ifdef CONFIG_ARM64_4K_PAGES
-       hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
-#endif
+       arm64_hugetlb_cma_reserve();
 
        /*
         * Sparsemem tries to allocate bootmem in memory_present(), so must be
-- 
2.7.4

Reply via email to