From: Michal Hocko <mho...@suse.com>

This reverts 91dcade47a3d0e7c31464ef05f56c08e92a0e9c2.
inactive_reclaimable_pages shouldn't be needed anymore since that
get_scan_count is aware of the eligble zones ("mm, vmscan: consider
eligible zones in get_scan_count").

Signed-off-by: Michal Hocko <mho...@suse.com>
---
 mm/vmscan.c | 27 ---------------------------
 1 file changed, 27 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a88e222784ea..486ba6d7dc4c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1700,30 +1700,6 @@ static int current_may_throttle(void)
                bdi_write_congested(current->backing_dev_info);
 }
 
-static bool inactive_reclaimable_pages(struct lruvec *lruvec,
-                               struct scan_control *sc, enum lru_list lru)
-{
-       int zid;
-       struct zone *zone;
-       int file = is_file_lru(lru);
-       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
-
-       if (!global_reclaim(sc))
-               return true;
-
-       for (zid = sc->reclaim_idx; zid >= 0; zid--) {
-               zone = &pgdat->node_zones[zid];
-               if (!managed_zone(zone))
-                       continue;
-
-               if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
-                               LRU_FILE * file) >= SWAP_CLUSTER_MAX)
-                       return true;
-       }
-
-       return false;
-}
-
 /*
  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
  * of reclaimed pages
@@ -1742,9 +1718,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct 
lruvec *lruvec,
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
-       if (!inactive_reclaimable_pages(lruvec, sc, lru))
-               return 0;
-
        while (unlikely(too_many_isolated(pgdat, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
 
-- 
2.11.0

Reply via email to