In order to influence page allocation decisions (i.e., to make page-allocation
region-aware), we need to be able to distinguish pageblocks belonging to
different zone memory regions within the zones' freelists.

So, within every freelist in a zone, provide pointers to describe the
boundaries of zone memory regions and counters to track the number of free
pageblocks within each region.

Also, fixup the references to the freelist's list_head inside struct free_area.

Signed-off-by: Srivatsa S. Bhat <[email protected]>
---

 include/linux/mmzone.h |   17 ++++++++++++++++-
 mm/compaction.c        |    8 ++++----
 mm/page_alloc.c        |   21 +++++++++++----------
 mm/vmstat.c            |    2 +-
 4 files changed, 32 insertions(+), 16 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3982354..aba4d68 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -82,8 +82,23 @@ static inline int get_pageblock_migratetype(struct page 
*page)
 
 #define MAX_NR_REGIONS 256
 
+struct mem_region_list {
+       struct list_head        *page_block;
+       unsigned long           nr_free;
+};
+
+struct free_list {
+       struct list_head        list;
+
+       /*
+        * Demarcates pageblocks belonging to different regions within
+        * this freelist.
+        */
+       struct mem_region_list  mr_list[MAX_NR_REGIONS];
+};
+
 struct free_area {
-       struct list_head        free_list[MIGRATE_TYPES];
+       struct free_list        free_list[MIGRATE_TYPES];
        unsigned long           nr_free;
 };
 
diff --git a/mm/compaction.c b/mm/compaction.c
index 9eef558..95f5c92 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -247,14 +247,14 @@ static void compact_capture_page(struct compact_control 
*cc)
                        struct page *page;
                        struct free_area *area;
                        area = &(cc->zone->free_area[order]);
-                       if (list_empty(&area->free_list[mtype]))
+                       if (list_empty(&area->free_list[mtype].list))
                                continue;
 
                        /* Take the lock and attempt capture of the page */
                        if (!compact_trylock_irqsave(&cc->zone->lock, &flags, 
cc))
                                return;
-                       if (!list_empty(&area->free_list[mtype])) {
-                               page = list_entry(area->free_list[mtype].next,
+                       if (!list_empty(&area->free_list[mtype].list)) {
+                               page = 
list_entry(area->free_list[mtype].list.next,
                                                        struct page, lru);
                                if (capture_free_page(page, cc->order, mtype)) {
                                        spin_unlock_irqrestore(&cc->zone->lock,
@@ -866,7 +866,7 @@ static int compact_finished(struct zone *zone,
                for (order = cc->order; order < MAX_ORDER; order++) {
                        struct free_area *area = &zone->free_area[cc->order];
                        /* Job done if page is free of the right migratetype */
-                       if (!list_empty(&area->free_list[cc->migratetype]))
+                       if (!list_empty(&area->free_list[cc->migratetype].list))
                                return COMPACT_PARTIAL;
 
                        /* Job done if allocation would set block type */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7fd89cd..62d0a9a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -588,12 +588,13 @@ static inline void __free_one_page(struct page *page,
                higher_buddy = higher_page + (buddy_idx - combined_idx);
                if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
                        list_add_tail(&page->lru,
-                               &zone->free_area[order].free_list[migratetype]);
+                               
&zone->free_area[order].free_list[migratetype].list);
                        goto out;
                }
        }
 
-       list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+       list_add(&page->lru,
+               &zone->free_area[order].free_list[migratetype].list);
 out:
        zone->free_area[order].nr_free++;
 }
@@ -811,7 +812,7 @@ static inline void expand(struct zone *zone, struct page 
*page,
                        continue;
                }
 #endif
-               list_add(&page[size].lru, &area->free_list[migratetype]);
+               list_add(&page[size].lru, &area->free_list[migratetype].list);
                area->nr_free++;
                set_page_order(&page[size], high);
        }
@@ -873,10 +874,10 @@ struct page *__rmqueue_smallest(struct zone *zone, 
unsigned int order,
        /* Find a page of the appropriate size in the preferred list */
        for (current_order = order; current_order < MAX_ORDER; ++current_order) 
{
                area = &(zone->free_area[current_order]);
-               if (list_empty(&area->free_list[migratetype]))
+               if (list_empty(&area->free_list[migratetype].list))
                        continue;
 
-               page = list_entry(area->free_list[migratetype].next,
+               page = list_entry(area->free_list[migratetype].list.next,
                                                        struct page, lru);
                list_del(&page->lru);
                rmv_page_order(page);
@@ -946,7 +947,7 @@ int move_freepages(struct zone *zone,
 
                order = page_order(page);
                list_move(&page->lru,
-                         &zone->free_area[order].free_list[migratetype]);
+                         &zone->free_area[order].free_list[migratetype].list);
                set_freepage_migratetype(page, migratetype);
                page += 1 << order;
                pages_moved += 1 << order;
@@ -1007,10 +1008,10 @@ __rmqueue_fallback(struct zone *zone, int order, int 
start_migratetype)
                                break;
 
                        area = &(zone->free_area[current_order]);
-                       if (list_empty(&area->free_list[migratetype]))
+                       if (list_empty(&area->free_list[migratetype].list))
                                continue;
 
-                       page = list_entry(area->free_list[migratetype].next,
+                       page = 
list_entry(area->free_list[migratetype].list.next,
                                        struct page, lru);
                        area->nr_free--;
 
@@ -1274,7 +1275,7 @@ void mark_free_pages(struct zone *zone)
                }
 
        for_each_migratetype_order(order, t) {
-               list_for_each(curr, &zone->free_area[order].free_list[t]) {
+               list_for_each(curr, &zone->free_area[order].free_list[t].list) {
                        unsigned long i;
 
                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
@@ -3859,7 +3860,7 @@ static void __meminit zone_init_free_lists(struct zone 
*zone)
 {
        int order, t;
        for_each_migratetype_order(order, t) {
-               INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
+               INIT_LIST_HEAD(&zone->free_area[order].free_list[t].list);
                zone->free_area[order].nr_free = 0;
        }
 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c737057..8183331 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -847,7 +847,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
 
                        area = &(zone->free_area[order]);
 
-                       list_for_each(curr, &area->free_list[mtype])
+                       list_for_each(curr, &area->free_list[mtype].list)
                                freecount++;
                        seq_printf(m, "%6lu ", freecount);
                }

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to