This is partially a preparation patch for more vmstat work but it also
has the slight advantage that __count_zid_vm_events is cheaper to
calculate than __count_zone_vm_events().

Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Acked-by: Vlastimil Babka <vba...@suse.cz>
---
 include/linux/vmstat.h | 5 ++---
 mm/page_alloc.c        | 2 +-
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index c31f8dc6121c..1cab6dd300ac 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -107,9 +107,8 @@ static inline void vm_events_fold_cpu(int cpu)
 #define count_vm_vmacache_event(x) do {} while (0)
 #endif
 
-#define __count_zone_vm_events(item, zone, delta) \
-               __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
-               zone_idx(zone), delta)
+#define __count_zid_vm_events(item, zid, delta) \
+       __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
 
 /*
  * Zone and node-based page accounting with per cpu differentials.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6d0a527cff3d..f5b4f5a372fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2621,7 +2621,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                          get_pcppage_migratetype(page));
        }
 
-       __count_zone_vm_events(PGALLOC, zone, 1 << order);
+       __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
        zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
 
-- 
2.6.4

Reply via email to