Since commit 682a3385e773 ("mm, page_alloc: inline the fast path of the
zonelist iterator") we replace a NULL nodemask with cpuset_current_mems_allowed
in the fast path, so that get_page_from_freelist() filters nodes allowed by the
cpuset via for_next_zone_zonelist_nodemask(). In that case it's pointless to
also check __cpuset_zone_allowed(), which we can avoid by not using
ALLOC_CPUSET in that scenario.

Signed-off-by: Vlastimil Babka <[email protected]>
---
 mm/page_alloc.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c6d5f64feca..3d86fbe2f4f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3754,9 +3754,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int 
order,
 
        if (cpusets_enabled()) {
                alloc_mask |= __GFP_HARDWALL;
-               alloc_flags |= ALLOC_CPUSET;
                if (!ac.nodemask)
                        ac.nodemask = &cpuset_current_mems_allowed;
+               else
+                       alloc_flags |= ALLOC_CPUSET;
        }
 
        gfp_mask &= gfp_allowed_mask;
-- 
2.11.0

Reply via email to