At 2015/1/30 20:34, Joonsoo Kim wrote:
> From: Joonsoo <iamjoonsoo....@lge.com>
> 
> This is preparation step to use page allocator's anti fragmentation logic
> in compaction. This patch just separates steal decision part from actual
> steal behaviour part so there is no functional change.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
> ---
>  mm/page_alloc.c | 49 ++++++++++++++++++++++++++++++++-----------------
>  1 file changed, 32 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 8d52ab1..ef74750 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1122,6 +1122,24 @@ static void change_pageblock_range(struct page 
> *pageblock_page,
>       }
>  }
>  
> +static bool can_steal_freepages(unsigned int order,
> +                             int start_mt, int fallback_mt)
> +{
> +     if (is_migrate_cma(fallback_mt))
> +             return false;
> +
> +     if (order >= pageblock_order)
> +             return true;
> +
> +     if (order >= pageblock_order / 2 ||
> +             start_mt == MIGRATE_RECLAIMABLE ||
> +             start_mt == MIGRATE_UNMOVABLE ||
> +             page_group_by_mobility_disabled)
> +             return true;
> +
> +     return false;
> +}

So some comments which can tell the cases can or cannot steal freepages
from other migratetype is necessary IMHO. Actually we can just
move some comments in try_to_steal_pages to here.

Thanks.

> +
>  /*
>   * When we are falling back to another migratetype during allocation, try to
>   * steal extra free pages from the same pageblocks to satisfy further
> @@ -1138,9 +1156,10 @@ static void change_pageblock_range(struct page 
> *pageblock_page,
>   * as well.
>   */
>  static void try_to_steal_freepages(struct zone *zone, struct page *page,
> -                               int start_type, int fallback_type)
> +                               int start_type)
>  {
>       int current_order = page_order(page);
> +     int pages;
>  
>       /* Take ownership for orders >= pageblock_order */
>       if (current_order >= pageblock_order) {
> @@ -1148,19 +1167,12 @@ static void try_to_steal_freepages(struct zone *zone, 
> struct page *page,
>               return;
>       }
>  
> -     if (current_order >= pageblock_order / 2 ||
> -         start_type == MIGRATE_RECLAIMABLE ||
> -         start_type == MIGRATE_UNMOVABLE ||
> -         page_group_by_mobility_disabled) {
> -             int pages;
> +     pages = move_freepages_block(zone, page, start_type);
>  
> -             pages = move_freepages_block(zone, page, start_type);
> -
> -             /* Claim the whole block if over half of it is free */
> -             if (pages >= (1 << (pageblock_order-1)) ||
> -                             page_group_by_mobility_disabled)
> -                     set_pageblock_migratetype(page, start_type);
> -     }
> +     /* Claim the whole block if over half of it is free */
> +     if (pages >= (1 << (pageblock_order-1)) ||
> +                     page_group_by_mobility_disabled)
> +             set_pageblock_migratetype(page, start_type);
>  }
>  
>  /* Remove an element from the buddy allocator from the fallback list */
> @@ -1170,6 +1182,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int 
> order, int start_migratetype)
>       struct free_area *area;
>       unsigned int current_order;
>       struct page *page;
> +     bool can_steal;
>  
>       /* Find the largest possible block of pages in the other list */
>       for (current_order = MAX_ORDER-1;
> @@ -1192,10 +1205,11 @@ __rmqueue_fallback(struct zone *zone, unsigned int 
> order, int start_migratetype)
>                                       struct page, lru);
>                       area->nr_free--;
>  
> -                     if (!is_migrate_cma(migratetype)) {
> +                     can_steal = can_steal_freepages(current_order,
> +                                     start_migratetype, migratetype);
> +                     if (can_steal) {
>                               try_to_steal_freepages(zone, page,
> -                                                     start_migratetype,
> -                                                     migratetype);
> +                                                     start_migratetype);
>                       } else {
>                               /*
>                                * When borrowing from MIGRATE_CMA, we need to
> @@ -1203,7 +1217,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int 
> order, int start_migratetype)
>                                * itself, and we do not try to steal extra
>                                * free pages.
>                                */
> -                             buddy_type = migratetype;
> +                             if (is_migrate_cma(migratetype))
> +                                     buddy_type = migratetype;
>                       }
>  
>                       /* Remove the page from the freelists */
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to