The should_continue_reclaim() function decides during direct reclaim/compaction
whether shrink_zone() should continue reclaming, or whether compaction is ready
to proceed in that zone. This relies mainly on the compaction_suitable() check,
but by passing a zero classzone_idx, there can be false positives and reclaim
terminates prematurely. Fix this by passing proper classzone_idx.

Additionally, the function checks whether (2UL << pages) were reclaimed. This
however overlaps with the same gap used by compaction_suitable(), and since the
number sc->nr_reclaimed is accumulated over all reclaimed zones, it doesn't
make much sense for deciding about a given single zone anyway. So just drop
this code.

Signed-off-by: Vlastimil Babka <vba...@suse.cz>
---
 mm/vmscan.c | 31 +++++++++----------------------
 1 file changed, 9 insertions(+), 22 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 640d2e615c36..391e5d2c4e32 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2309,11 +2309,9 @@ static bool in_reclaim_compaction(struct scan_control 
*sc)
 static inline bool should_continue_reclaim(struct zone *zone,
                                        unsigned long nr_reclaimed,
                                        unsigned long nr_scanned,
-                                       struct scan_control *sc)
+                                       struct scan_control *sc,
+                                       int classzone_idx)
 {
-       unsigned long pages_for_compaction;
-       unsigned long inactive_lru_pages;
-
        /* If not in reclaim/compaction mode, stop */
        if (!in_reclaim_compaction(sc))
                return false;
@@ -2341,20 +2339,8 @@ static inline bool should_continue_reclaim(struct zone 
*zone,
                        return false;
        }
 
-       /*
-        * If we have not reclaimed enough pages for compaction and the
-        * inactive lists are large enough, continue reclaiming
-        */
-       pages_for_compaction = compact_gap(sc->order);
-       inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
-       if (get_nr_swap_pages() > 0)
-               inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
-       if (sc->nr_reclaimed < pages_for_compaction &&
-                       inactive_lru_pages > pages_for_compaction)
-               return true;
-
        /* If compaction would go ahead or the allocation would succeed, stop */
-       switch (compaction_suitable(zone, sc->order, 0, 0)) {
+       switch (compaction_suitable(zone, sc->order, 0, classzone_idx)) {
        case COMPACT_PARTIAL:
        case COMPACT_CONTINUE:
                return false;
@@ -2364,11 +2350,12 @@ static inline bool should_continue_reclaim(struct zone 
*zone,
 }
 
 static bool shrink_zone(struct zone *zone, struct scan_control *sc,
-                       bool is_classzone)
+                       int classzone_idx)
 {
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long nr_reclaimed, nr_scanned;
        bool reclaimable = false;
+       bool is_classzone = (classzone_idx == zone_idx(zone));
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2450,7 +2437,7 @@ static bool shrink_zone(struct zone *zone, struct 
scan_control *sc,
                        reclaimable = true;
 
        } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
-                                        sc->nr_scanned - nr_scanned, sc));
+                        sc->nr_scanned - nr_scanned, sc, classzone_idx));
 
        return reclaimable;
 }
@@ -2580,7 +2567,7 @@ static void shrink_zones(struct zonelist *zonelist, 
struct scan_control *sc)
                        /* need some check for avoid more shrink_zone() */
                }
 
-               shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
+               shrink_zone(zone, sc, classzone_idx);
        }
 
        /*
@@ -3076,7 +3063,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
                                                balance_gap, classzone_idx))
                return true;
 
-       shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
+       shrink_zone(zone, sc, classzone_idx);
 
        clear_bit(ZONE_WRITEBACK, &zone->flags);
 
@@ -3678,7 +3665,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t 
gfp_mask, unsigned int order)
                 * priorities until we have enough memory freed.
                 */
                do {
-                       shrink_zone(zone, &sc, true);
+                       shrink_zone(zone, &sc, zone_idx(zone));
                } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
        }
 
-- 
2.8.3

Reply via email to