On 06/20/2014 11:49 PM, Vlastimil Babka wrote:
> isolate_migratepages_range() is the main function of the compaction scanner,
> called either on a single pageblock by isolate_migratepages() during regular
> compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
> It currently perfoms two pageblock-wide compaction suitability checks, and
> because of the CMA callpath, it tracks if it crossed a pageblock boundary in
> order to repeat those checks.
> 
> However, closer inspection shows that those checks are always true for CMA:
> - isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
> - migrate_async_suitable() check is skipped because CMA uses sync compaction
> 
> We can therefore move the checks to isolate_migratepages(), reducing variables
> and simplifying isolate_migratepages_range(). The update_pageblock_skip()
> function also no longer needs set_unsuitable parameter.
> 
> Furthermore, going back to compact_zone() and compact_finished() when 
> pageblock
> is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
> The patch therefore also introduces a simple loop into isolate_migratepages()
> so that it does not return immediately on pageblock checks, but keeps going
> until isolate_migratepages_range() gets called once. Similarily to
> isolate_freepages(), the function periodically checks if it needs to 
> reschedule
> or abort async compaction.
> 
> Signed-off-by: Vlastimil Babka <vba...@suse.cz>
> Cc: Minchan Kim <minc...@kernel.org>
> Cc: Mel Gorman <mgor...@suse.de>
> Cc: Joonsoo Kim <iamjoonsoo....@lge.com>
> Cc: Michal Nazarewicz <min...@mina86.com>
> Cc: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
> Cc: Christoph Lameter <c...@linux.com>
> Cc: Rik van Riel <r...@redhat.com>
> Cc: David Rientjes <rient...@google.com>

I think this is a good clean-up to make code more clear.

Reviewed-by: Zhang Yanfei <zhangyan...@cn.fujitsu.com>

Only a tiny nit-pick below.

> ---
>  mm/compaction.c | 112 
> +++++++++++++++++++++++++++++---------------------------
>  1 file changed, 59 insertions(+), 53 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 3064a7f..ebe30c9 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
>   */
>  static void update_pageblock_skip(struct compact_control *cc,
>                       struct page *page, unsigned long nr_isolated,
> -                     bool set_unsuitable, bool migrate_scanner)
> +                     bool migrate_scanner)
>  {
>       struct zone *zone = cc->zone;
>       unsigned long pfn;
> @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
> *cc,
>       if (nr_isolated)
>               return;
>  
> -     /*
> -      * Only skip pageblocks when all forms of compaction will be known to
> -      * fail in the near future.
> -      */
> -     if (set_unsuitable)
> -             set_pageblock_skip(page);
> +     set_pageblock_skip(page);
>  
>       pfn = page_to_pfn(page);
>  
> @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
> compact_control *cc,
>  
>  static void update_pageblock_skip(struct compact_control *cc,
>                       struct page *page, unsigned long nr_isolated,
> -                     bool set_unsuitable, bool migrate_scanner)
> +                     bool migrate_scanner)
>  {
>  }
>  #endif /* CONFIG_COMPACTION */
> @@ -345,8 +340,7 @@ isolate_fail:
>  
>       /* Update the pageblock-skip if the whole pageblock was scanned */
>       if (blockpfn == end_pfn)
> -             update_pageblock_skip(cc, valid_page, total_isolated, true,
> -                                   false);
> +             update_pageblock_skip(cc, valid_page, total_isolated, false);
>  
>       count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
>       if (total_isolated)
> @@ -474,14 +468,12 @@ unsigned long
>  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
>               unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
>  {
> -     unsigned long last_pageblock_nr = 0, pageblock_nr;
>       unsigned long nr_scanned = 0, nr_isolated = 0;
>       struct list_head *migratelist = &cc->migratepages;
>       struct lruvec *lruvec;
>       unsigned long flags;
>       bool locked = false;
>       struct page *page = NULL, *valid_page = NULL;
> -     bool set_unsuitable = true;
>       const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
>                                       ISOLATE_ASYNC_MIGRATE : 0) |
>                                   (unevictable ? ISOLATE_UNEVICTABLE : 0);
> @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
> compact_control *cc,
>               if (!valid_page)
>                       valid_page = page;
>  
> -             /* If isolation recently failed, do not retry */
> -             pageblock_nr = low_pfn >> pageblock_order;
> -             if (last_pageblock_nr != pageblock_nr) {
> -                     int mt;
> -
> -                     last_pageblock_nr = pageblock_nr;
> -                     if (!isolation_suitable(cc, page))
> -                             goto next_pageblock;
> -
> -                     /*
> -                      * For async migration, also only scan in MOVABLE
> -                      * blocks. Async migration is optimistic to see if
> -                      * the minimum amount of work satisfies the allocation
> -                      */
> -                     mt = get_pageblock_migratetype(page);
> -                     if (cc->mode == MIGRATE_ASYNC &&
> -                         !migrate_async_suitable(mt)) {
> -                             set_unsuitable = false;
> -                             goto next_pageblock;
> -                     }
> -             }
> -
>               /*
>                * Skip if free. page_order cannot be used without zone->lock
>                * as nothing prevents parallel allocations or buddy merging.
> @@ -668,8 +638,7 @@ next_pageblock:
>        * if the whole pageblock was scanned without isolating any page.
>        */
>       if (low_pfn == end_pfn)
> -             update_pageblock_skip(cc, valid_page, nr_isolated,
> -                                   set_unsuitable, true);
> +             update_pageblock_skip(cc, valid_page, nr_isolated, true);
>  
>       trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
>  
> @@ -840,34 +809,74 @@ typedef enum {
>  } isolate_migrate_t;
>  
>  /*
> - * Isolate all pages that can be migrated from the block pointed to by
> - * the migrate scanner within compact_control.
> + * Isolate all pages that can be migrated from the first suitable block,
> + * starting at the block pointed to by the migrate scanner pfn within
> + * compact_control.
>   */
>  static isolate_migrate_t isolate_migratepages(struct zone *zone,
>                                       struct compact_control *cc)
>  {
>       unsigned long low_pfn, end_pfn;
> +     struct page *page;
>  
> -     /* Do not scan outside zone boundaries */
> -     low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
> +     /* Start at where we last stopped, or beginning of the zone */
> +     low_pfn = cc->migrate_pfn;

This is ok since cc->migrate_pfn has been restricted to be inside the zone.
But the comment here maybe confusing...

Thanks.

>  
>       /* Only scan within a pageblock boundary */
>       end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
>  
> -     /* Do not cross the free scanner or scan within a memory hole */
> -     if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
> -             cc->migrate_pfn = end_pfn;
> -             return ISOLATE_NONE;
> -     }
> +     /*
> +      * Iterate over whole pageblocks until we find the first suitable.
> +      * Do not cross the free scanner.
> +      */
> +     for (; end_pfn <= cc->free_pfn;
> +                     low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
> +
> +             /*
> +              * This can potentially iterate a massively long zone with
> +              * many pageblocks unsuitable, so periodically check if we
> +              * need to schedule, or even abort async compaction.
> +              */
> +             if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
> +                                             && compact_should_abort(cc))
> +                     break;
>  
> -     /* Perform the isolation */
> -     low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
> -     if (!low_pfn || cc->contended)
> -             return ISOLATE_ABORT;
> +             /* Do not scan within a memory hole */
> +             if (!pfn_valid(low_pfn))
> +                     continue;
> +
> +             page = pfn_to_page(low_pfn);
> +             /* If isolation recently failed, do not retry */
> +             if (!isolation_suitable(cc, page))
> +                     continue;
>  
> +             /*
> +              * For async compaction, also only scan in MOVABLE blocks.
> +              * Async compaction is optimistic to see if the minimum amount
> +              * of work satisfies the allocation.
> +              */
> +             if (cc->mode == MIGRATE_ASYNC &&
> +                 !migrate_async_suitable(get_pageblock_migratetype(page)))
> +                     continue;
> +
> +             /* Perform the isolation */
> +             low_pfn = isolate_migratepages_range(zone, cc, low_pfn,
> +                                                             end_pfn, false);
> +             if (!low_pfn || cc->contended)
> +                     return ISOLATE_ABORT;
> +
> +             /*
> +              * Either we isolated something and proceed with migration. Or
> +              * we failed and compact_zone should decide if we should
> +              * continue or not.
> +              */
> +             break;
> +     }
> +
> +     /* Record where migration scanner will be restarted */
>       cc->migrate_pfn = low_pfn;
>  
> -     return ISOLATE_SUCCESS;
> +     return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
>  }
>  
>  static int compact_finished(struct zone *zone,
> @@ -1040,9 +1049,6 @@ static int compact_zone(struct zone *zone, struct 
> compact_control *cc)
>                       ;
>               }
>  
> -             if (!cc->nr_migratepages)
> -                     continue;
> -
>               err = migrate_pages(&cc->migratepages, compaction_alloc,
>                               compaction_free, (unsigned long)cc, cc->mode,
>                               MR_COMPACTION);
> 


-- 
Thanks.
Zhang Yanfei
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to