Currently, in free_area_init_core(), when initialize zone's field, a
rough value is set to zone->managed_pages. That value is calculated by
(zone->present_pages - memmap_pages).

In the meantime, add the value to nr_all_pages and nr_kernel_pages which
represent all free pages of system (only low memory or including HIGHMEM
memory separately). Both of them are gonna be used in
alloc_large_system_hash().

However, the rough calculation and setting of zone->managed_pages is
meaningless because
  a) memmap pages are allocated on units of node in sparse_init() or
     alloc_node_mem_map(pgdat); The simple (zone->present_pages -
     memmap_pages) is too rough to make sense for zone;
  b) the set zone->managed_pages will be zeroed out and reset with
     acutal value in mem_init() via memblock_free_all(). Before the
     resetting, no buddy allocation request is issued.

Here, remove the meaningless and complicated calculation of
(zone->present_pages - memmap_pages), directly set zone->present_pages to
zone->managed_pages. It will be adjusted in mem_init().

And also remove the assignment of nr_all_pages and nr_kernel_pages in
free_area_init_core(). Instead, call the newly added calc_nr_kernel_pages()
to count up all free but not reserved memory in memblock and assign to
nr_all_pages and nr_kernel_pages. The counting excludes memmap_pages,
and other kernel used data, which is more accurate than old way and
simpler, and can also cover the ppc required arch_reserved_kernel_pages()
case.

Signed-off-by: Baoquan He <b...@redhat.com>
---
 mm/mm_init.c | 38 ++++++--------------------------------
 1 file changed, 6 insertions(+), 32 deletions(-)

diff --git a/mm/mm_init.c b/mm/mm_init.c
index c57a7fc97a16..55a2b886b7a6 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1584,41 +1584,14 @@ static void __init free_area_init_core(struct 
pglist_data *pgdat)
 
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
-               unsigned long size, freesize, memmap_pages;
-
-               size = zone->spanned_pages;
-               freesize = zone->present_pages;
-
-               /*
-                * Adjust freesize so that it accounts for how much memory
-                * is used by this zone for memmap. This affects the watermark
-                * and per-cpu initialisations
-                */
-               memmap_pages = calc_memmap_size(size, freesize);
-               if (!is_highmem_idx(j)) {
-                       if (freesize >= memmap_pages) {
-                               freesize -= memmap_pages;
-                               if (memmap_pages)
-                                       pr_debug("  %s zone: %lu pages used for 
memmap\n",
-                                                zone_names[j], memmap_pages);
-                       } else
-                               pr_warn("  %s zone: %lu memmap pages exceeds 
freesize %lu\n",
-                                       zone_names[j], memmap_pages, freesize);
-               }
-
-               if (!is_highmem_idx(j))
-                       nr_kernel_pages += freesize;
-               /* Charge for highmem memmap if there are enough kernel pages */
-               else if (nr_kernel_pages > memmap_pages * 2)
-                       nr_kernel_pages -= memmap_pages;
-               nr_all_pages += freesize;
+               unsigned long size = zone->spanned_pages;
 
                /*
-                * Set an approximate value for lowmem here, it will be adjusted
-                * when the bootmem allocator frees pages into the buddy system.
-                * And all highmem pages will be managed by the buddy system.
+                * Set the zone->managed_pages as zone->present_pages roughly, 
it
+                * be zeroed out and reset when memblock allocator frees pages 
into
+                * buddy system.
                 */
-               zone_init_internals(zone, j, nid, freesize);
+               zone_init_internals(zone, j, nid, zone->present_pages);
 
                if (!size)
                        continue;
@@ -1915,6 +1888,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
                check_for_memory(pgdat);
        }
 
+       calc_nr_kernel_pages();
        memmap_init();
 
        /* disable hash distribution for systems with a single node */
-- 
2.41.0

Reply via email to