On Mon 06-06-16 15:48:30, Johannes Weiner wrote:
> Currently, THP are counted as single pages until they are split right
> before being swapped out. However, at that point the VM is already in
> the middle of reclaim, and adjusting the LRU balance then is useless.
> 
> Always account THP by the number of basepages, and remove the fixup
> from the splitting path.
> 
> Signed-off-by: Johannes Weiner <han...@cmpxchg.org>

Acked-by: Michal Hocko <mho...@suse.com>

> ---
>  mm/swap.c | 18 ++++++++----------
>  1 file changed, 8 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/swap.c b/mm/swap.c
> index d2786a6308dd..c6936507abb5 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -249,13 +249,14 @@ void rotate_reclaimable_page(struct page *page)
>  }
>  
>  static void update_page_reclaim_stat(struct lruvec *lruvec,
> -                                  int file, int rotated)
> +                                  int file, int rotated,
> +                                  unsigned int nr_pages)
>  {
>       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
>  
> -     reclaim_stat->recent_scanned[file]++;
> +     reclaim_stat->recent_scanned[file] += nr_pages;
>       if (rotated)
> -             reclaim_stat->recent_rotated[file]++;
> +             reclaim_stat->recent_rotated[file] += nr_pages;
>  }
>  
>  static void __activate_page(struct page *page, struct lruvec *lruvec,
> @@ -272,7 +273,7 @@ static void __activate_page(struct page *page, struct 
> lruvec *lruvec,
>               trace_mm_lru_activate(page);
>  
>               __count_vm_event(PGACTIVATE);
> -             update_page_reclaim_stat(lruvec, file, 1);
> +             update_page_reclaim_stat(lruvec, file, 1, hpage_nr_pages(page));
>       }
>  }
>  
> @@ -532,7 +533,7 @@ static void lru_deactivate_file_fn(struct page *page, 
> struct lruvec *lruvec,
>  
>       if (active)
>               __count_vm_event(PGDEACTIVATE);
> -     update_page_reclaim_stat(lruvec, file, 0);
> +     update_page_reclaim_stat(lruvec, file, 0, hpage_nr_pages(page));
>  }
>  
>  
> @@ -549,7 +550,7 @@ static void lru_deactivate_fn(struct page *page, struct 
> lruvec *lruvec,
>               add_page_to_lru_list(page, lruvec, lru);
>  
>               __count_vm_event(PGDEACTIVATE);
> -             update_page_reclaim_stat(lruvec, file, 0);
> +             update_page_reclaim_stat(lruvec, file, 0, hpage_nr_pages(page));
>       }
>  }
>  
> @@ -809,9 +810,6 @@ void lru_add_page_tail(struct page *page, struct page 
> *page_tail,
>               list_head = page_tail->lru.prev;
>               list_move_tail(&page_tail->lru, list_head);
>       }
> -
> -     if (!PageUnevictable(page))
> -             update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
>  }
>  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>  
> @@ -826,7 +824,7 @@ static void __pagevec_lru_add_fn(struct page *page, 
> struct lruvec *lruvec,
>  
>       SetPageLRU(page);
>       add_page_to_lru_list(page, lruvec, lru);
> -     update_page_reclaim_stat(lruvec, file, active);
> +     update_page_reclaim_stat(lruvec, file, active, hpage_nr_pages(page));
>       trace_mm_lru_insertion(page, lru);
>  }
>  
> -- 
> 2.8.3

-- 
Michal Hocko
SUSE Labs

Reply via email to