From: "Mike Rapoport (Microsoft)" <r...@kernel.org>

Allocating the zero pages from memblock is simpler because the memory is
already reserved.

This will also help with pulling out memblock_free_all() to the generic
code and reducing code duplication in arch::mem_init().

Signed-off-by: Mike Rapoport (Microsoft) <r...@kernel.org>
---
 arch/s390/mm/init.c | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f2298f7a3f21..020aa2f78d01 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -73,8 +73,6 @@ static void __init setup_zero_pages(void)
 {
        unsigned long total_pages = memblock_estimated_nr_free_pages();
        unsigned int order;
-       struct page *page;
-       int i;
 
        /* Latest machines require a mapping granularity of 512KB */
        order = 7;
@@ -83,17 +81,10 @@ static void __init setup_zero_pages(void)
        while (order > 2 && (total_pages >> 10) < (1UL << order))
                order--;
 
-       empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+       empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE << order, 
order);
        if (!empty_zero_page)
                panic("Out of memory in setup_zero_pages");
 
-       page = virt_to_page((void *) empty_zero_page);
-       split_page(page, order);
-       for (i = 1 << order; i > 0; i--) {
-               mark_page_reserved(page);
-               page++;
-       }
-
        zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 }
 
@@ -176,9 +167,10 @@ void __init mem_init(void)
        pv_init();
        kfence_split_mapping();
 
+       setup_zero_pages();     /* Setup zeroed pages. */
+
        /* this will put all low memory onto the freelists */
        memblock_free_all();
-       setup_zero_pages();     /* Setup zeroed pages. */
 }
 
 unsigned long memory_block_size_bytes(void)
-- 
2.47.2


Reply via email to