On Thu, Jul 11, 2013 at 7:03 PM, Robin Holt <h...@sgi.com> wrote:
> Currently, memmap_init_zone() has all the smarts for initializing a
> single page.  When we convert to initializing pages in a 2MiB chunk,
> we will need to do this equivalent work from two separate places
> so we are breaking out a helper function.
>
> Signed-off-by: Robin Holt <h...@sgi.com>
> Signed-off-by: Nate Zimmer <nzim...@sgi.com>
> To: "H. Peter Anvin" <h...@zytor.com>
> To: Ingo Molnar <mi...@kernel.org>
> Cc: Linux Kernel <linux-kernel@vger.kernel.org>
> Cc: Linux MM <linux...@kvack.org>
> Cc: Rob Landley <r...@landley.net>
> Cc: Mike Travis <tra...@sgi.com>
> Cc: Daniel J Blueman <dan...@numascale-asia.com>
> Cc: Andrew Morton <a...@linux-foundation.org>
> Cc: Greg KH <gre...@linuxfoundation.org>
> Cc: Yinghai Lu <ying...@kernel.org>
> Cc: Mel Gorman <mgor...@suse.de>
> ---
>  mm/mm_init.c    |  2 +-
>  mm/page_alloc.c | 75 
> +++++++++++++++++++++++++++++++++------------------------
>  2 files changed, 45 insertions(+), 32 deletions(-)
>
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index c280a02..be8a539 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -128,7 +128,7 @@ void __init mminit_verify_pageflags_layout(void)
>         BUG_ON(or_mask != add_mask);
>  }
>
> -void __meminit mminit_verify_page_links(struct page *page, enum zone_type 
> zone,
> +void mminit_verify_page_links(struct page *page, enum zone_type zone,
>                         unsigned long nid, unsigned long pfn)
>  {
>         BUG_ON(page_to_nid(page) != nid);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index c3edb62..635b131 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -697,6 +697,49 @@ static void free_one_page(struct zone *zone, struct page 
> *page, int order,
>         spin_unlock(&zone->lock);
>  }
>
> +static void __init_single_page(struct page *page, unsigned long zone, int 
> nid, int reserved)
> +{
> +       unsigned long pfn = page_to_pfn(page);
> +       struct zone *z = &NODE_DATA(nid)->node_zones[zone];
> +
> +       set_page_links(page, zone, nid, pfn);
> +       mminit_verify_page_links(page, zone, nid, pfn);
> +       init_page_count(page);
> +       page_mapcount_reset(page);
> +       page_nid_reset_last(page);
> +       if (reserved) {
> +               SetPageReserved(page);
> +       } else {
> +               ClearPageReserved(page);
> +               set_page_count(page, 0);
> +       }
> +       /*
> +        * Mark the block movable so that blocks are reserved for
> +        * movable at startup. This will force kernel allocations
> +        * to reserve their blocks rather than leaking throughout
> +        * the address space during boot when many long-lived
> +        * kernel allocations are made. Later some blocks near
> +        * the start are marked MIGRATE_RESERVE by
> +        * setup_zone_migrate_reserve()
> +        *
> +        * bitmap is created for zone's valid pfn range. but memmap
> +        * can be created for invalid pages (for alignment)
> +        * check here not to call set_pageblock_migratetype() against
> +        * pfn out of zone.
> +        */
> +       if ((z->zone_start_pfn <= pfn)
> +           && (pfn < zone_end_pfn(z))
> +           && !(pfn & (pageblock_nr_pages - 1)))
> +               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
> +
> +       INIT_LIST_HEAD(&page->lru);
> +#ifdef WANT_PAGE_VIRTUAL
> +       /* The shift won't overflow because ZONE_NORMAL is below 4G. */
> +       if (!is_highmem_idx(zone))
> +               set_page_address(page, __va(pfn << PAGE_SHIFT));
> +#endif
> +}
> +
>  static bool free_pages_prepare(struct page *page, unsigned int order)
>  {
>         int i;
> @@ -3934,37 +3977,7 @@ void __meminit memmap_init_zone(unsigned long size, 
> int nid, unsigned long zone,
>                                 continue;
>                 }
>                 page = pfn_to_page(pfn);
> -               set_page_links(page, zone, nid, pfn);
> -               mminit_verify_page_links(page, zone, nid, pfn);
> -               init_page_count(page);
> -               page_mapcount_reset(page);
> -               page_nid_reset_last(page);
> -               SetPageReserved(page);
> -               /*
> -                * Mark the block movable so that blocks are reserved for
> -                * movable at startup. This will force kernel allocations
> -                * to reserve their blocks rather than leaking throughout
> -                * the address space during boot when many long-lived
> -                * kernel allocations are made. Later some blocks near
> -                * the start are marked MIGRATE_RESERVE by
> -                * setup_zone_migrate_reserve()
> -                *
> -                * bitmap is created for zone's valid pfn range. but memmap
> -                * can be created for invalid pages (for alignment)
> -                * check here not to call set_pageblock_migratetype() against
> -                * pfn out of zone.
> -                */
> -               if ((z->zone_start_pfn <= pfn)
> -                   && (pfn < zone_end_pfn(z))
> -                   && !(pfn & (pageblock_nr_pages - 1)))
> -                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
> -
> -               INIT_LIST_HEAD(&page->lru);
> -#ifdef WANT_PAGE_VIRTUAL
> -               /* The shift won't overflow because ZONE_NORMAL is below 4G. 
> */
> -               if (!is_highmem_idx(zone))
> -                       set_page_address(page, __va(pfn << PAGE_SHIFT));
> -#endif
> +               __init_single_page(page, zone, nid, 1);
Can you
move page = pfn_to_page(pfn) into __init_single_page
and pass pfn directly?

Yinghai
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to