Some architectures would want different restrictions. Hence add an
architecture-specific override.

Both the PMD_SIZE check and pageblock alignment check are moved there.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 mm/memory_hotplug.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1b19462f4e72..07c99b0cc371 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1247,12 +1247,20 @@ static int online_memory_block(struct memory_block 
*mem, void *arg)
        return device_online(&mem->dev);
 }
 
-static bool mhp_supports_memmap_on_memory(unsigned long size)
+#ifndef arch_supports_memmap_on_memory
+static inline bool arch_supports_memmap_on_memory(unsigned long size)
 {
-       unsigned long nr_vmemmap_pages = size / PAGE_SIZE;
+       unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT;
        unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
        unsigned long remaining_size = size - vmemmap_size;
 
+       return IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
+               IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
+}
+#endif
+
+static bool mhp_supports_memmap_on_memory(unsigned long size)
+{
        /*
         * Besides having arch support and the feature enabled at runtime, we
         * need a few more assumptions to hold true:
@@ -1280,9 +1288,8 @@ static bool mhp_supports_memmap_on_memory(unsigned long 
size)
         *       populate a single PMD.
         */
        return mhp_memmap_on_memory() &&
-              size == memory_block_size_bytes() &&
-              IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
-              IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
+               size == memory_block_size_bytes() &&
+               arch_supports_memmap_on_memory(size);
 }
 
 /*
-- 
2.41.0

Reply via email to