If we use a division operation, we can compute a batch count more closed
to ideal value. With this value, we can remove at least one list in each
iteration. So we end the logic within MIGRATE_PCPTYPES iteration.

This makes logic more simple and understandable. In addition,
batching to free more pages may be helpful to cache usage.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b212554..2632131 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -633,53 +633,71 @@ static inline int free_pages_check(struct page *page)
 static void free_pcppages_bulk(struct zone *zone, int count,
                                        struct per_cpu_pages *pcp)
 {
-       int migratetype = 0;
-       int batch_free = 0;
+       struct page *page;
+       struct list_head *list;
        int to_free = count;
+       int batch_free;
+       int mt, page_mt;
+       int nr_list;
+       int i;
+       bool all = false;
+
+       if (pcp->count == to_free)
+               all = true;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       while (to_free) {
-               struct page *page;
-               struct list_head *list;
+redo:
+       /* Count non-empty list */
+       nr_list = 0;
+       for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+               list = &pcp->lists[mt];
+               if (!list_empty(list))
+                       nr_list++;
+       }
 
-               /*
-                * Remove pages from lists in a round-robin fashion. A
-                * batch_free count is maintained that is incremented when an
-                * empty list is encountered.  This is so more pages are freed
-                * off fuller lists instead of spinning excessively around empty
-                * lists
-                */
-               do {
-                       batch_free++;
-                       if (++migratetype == MIGRATE_PCPTYPES)
-                               migratetype = 0;
-                       list = &pcp->lists[migratetype];
-               } while (list_empty(list));
+       /*
+        * If there is only one non-empty list, free them all.
+        * Otherwise, remove pages from lists in a round-robin fashion.
+        * batch_free is set to remove at least one list.
+        */
+       if (all || nr_list == 1)
+               batch_free = to_free;
+       else
+               batch_free = to_free / nr_list;
 
-               /* This is the only non-empty list. Free them all. */
-               if (batch_free == MIGRATE_PCPTYPES)
-                       batch_free = to_free;
+       for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+               list = &pcp->lists[mt];
 
-               do {
-                       int mt; /* migratetype of the to-be-freed page */
+               for (i = 0; i < batch_free; i++) {
+                       if (list_empty(list))
+                               break;
 
+                       to_free--;
                        page = list_entry(list->prev, struct page, lru);
+
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
-                       mt = get_freepage_migratetype(page);
+                       page_mt = get_freepage_migratetype(page);
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-                       __free_one_page(page, zone, 0, mt);
-                       trace_mm_page_pcpu_drain(page, 0, mt);
-                       if (likely(!is_migrate_isolate_page(page))) {
-                               __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
-                               if (is_migrate_cma(mt))
-                                       __mod_zone_page_state(zone, 
NR_FREE_CMA_PAGES, 1);
-                       }
-               } while (--to_free && --batch_free && !list_empty(list));
+                       __free_one_page(page, zone, 0, page_mt);
+                       trace_mm_page_pcpu_drain(page, 0, page_mt);
+
+                       if (unlikely(is_migrate_isolate_page(page)))
+                               continue;
+
+                       __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
+                       if (is_migrate_cma(page_mt))
+                               __mod_zone_page_state(zone,
+                                               NR_FREE_CMA_PAGES, 1);
+               }
        }
+
+       if (to_free)
+               goto redo;
+
        spin_unlock(&zone->lock);
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to