This is partially a preparation patch for more vmstat work but it also has
the slight advantage that __count_zid_vm_events is cheaper to calculate
than __count_zone_vm_events().

Link: 
http://lkml.kernel.org/r/1466518566-30034-27-git-send-email-mgor...@techsingularity.net
Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Acked-by: Vlastimil Babka <vba...@suse.cz>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Rik van Riel <r...@surriel.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
---
 include/linux/vmstat.h | 5 ++---
 mm/page_alloc.c        | 2 +-
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 552d0db4fca2..0e53874a66a9 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -101,9 +101,8 @@ static inline void vm_events_fold_cpu(int cpu)
 #define count_vm_vmacache_event(x) do {} while (0)
 #endif
 
-#define __count_zone_vm_events(item, zone, delta) \
-               __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
-               zone_idx(zone), delta)
+#define __count_zid_vm_events(item, zid, delta) \
+       __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
 
 /*
  * Zone and node-based page accounting with per cpu differentials.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 69ffaadc31ed..d3eb15c35bb1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2659,7 +2659,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                          get_pcppage_migratetype(page));
        }
 
-       __count_zone_vm_events(PGALLOC, zone, 1 << order);
+       __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
        zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
 
-- 
2.6.4

Reply via email to