If a page is about to be dirtied then the page allocator attempts to limit
the total number of dirty pages that exists in any given zone.  The call
to node_dirty_ok is expensive so this patch records if the last pgdat
examined hit the dirty limits.  In some cases, this reduces the number of
calls to node_dirty_ok().

Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Acked-by: Vlastimil Babka <vba...@suse.cz>
---
 mm/page_alloc.c | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 565f08832853..958424fc64be 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2891,6 +2891,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
 {
        struct zoneref *z = ac->preferred_zoneref;
        struct zone *zone;
+       struct pglist_data *last_pgdat_dirty_limit = NULL;
+
        /*
         * Scan zonelist, looking for a zone with enough free.
         * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
@@ -2923,8 +2925,15 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
                 * will require awareness of nodes in the
                 * dirty-throttling and the flusher threads.
                 */
-               if (ac->spread_dirty_pages && !node_dirty_ok(zone->zone_pgdat))
-                       continue;
+               if (ac->spread_dirty_pages) {
+                       if (last_pgdat_dirty_limit == zone->zone_pgdat)
+                               continue;
+
+                       if (!node_dirty_ok(zone->zone_pgdat)) {
+                               last_pgdat_dirty_limit = zone->zone_pgdat;
+                               continue;
+                       }
+               }
 
                mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
                if (!zone_watermark_fast(zone, order, mark,
-- 
2.6.4

Reply via email to